inputs
stringlengths 312
52k
| targets
stringlengths 1
3.1k
⌀ | block_type
stringclasses 11
values | scenario
stringclasses 7
values |
---|---|---|---|
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
# searcharray/searcharray/postings.py
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
# searcharray/searcharray/postings.py
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
<fim_suffix>
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>term_scores = [] | term_scores = [] | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/solr.py
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
<fim_suffix>
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>return self.posns.docfreq(self.term_dict.get_term_id(token)) | return self.posns.docfreq(self.term_dict.get_term_id(token)) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/middle_out.py
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
# searcharray/searcharray/phrase/middle_out.py
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
# searcharray/searcharray/phrase/middle_out.py
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
"""
"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
<fim_suffix>
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle>_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm) | _, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/mat_set.py
def __str__(self):
as_str = [""]
for idx, (row, row_next) in enumerate(zip(self.rows, self.rows[1:])):
as_str.append(f"{idx}: {self.cols[row:row_next]}")
return "\n".join(as_str)
# searcharray/searcharray/indexing.py
def _lex_sort(terms_w_posns):
"""Sort terms, then doc_id, then posn."""
# Because docs / posns already sorted, we can just sort on terms
# Equivelant to np.lexsort(terms_w_posns[[::-1], :])
return np.argsort(terms_w_posns[0, :], kind='stable')
# searcharray/searcharray/phrase/middle_out.py
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
"""
"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
<fim_suffix>
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle>msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits | msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/solr.py
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
<fim_suffix>
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>return [np.array([], dtype=np.uint32)] | return [np.array([], dtype=np.uint32)] | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/solr.py
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
<fim_suffix>
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>return decs | return decs | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
# searcharray/searcharray/postings.py
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
# searcharray/searcharray/postings.py
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
<fim_suffix>
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score) | for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/middle_out.py
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
# searcharray/searcharray/utils/row_viewable_matrix.py
def __init__(self, mat: SparseMatSet, rows: Optional[np.ndarray] = None, subset=False):
self.mat = mat
self.col_cache: Dict[int, np.ndarray] = {}
self.cols_cached: List[int] = []
if rows is None:
self.rows = np.arange(self.mat.shape[0])
elif isinstance(rows, numbers.Integral):
self.rows = np.array([rows])
else:
self.rows = rows
self.subset = subset
# searcharray/searcharray/phrase/posn_diffs.py
def stack_term_posns(term_posns: List[List[np.ndarray]], phrase_freqs: np.ndarray, width: int = 10):
# Pad for easy difference computation
keep_term_posns = []
# keep_mask = np.ones(len(self), dtype=bool)
for term_posn in term_posns:
this_term_posns = vstack_with_mask(term_posn, phrase_freqs, width=width)
keep_term_posns.append(this_term_posns)
return keep_term_posns
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
<fim_suffix>
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False | for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/similarity.py
def bm25_legacy_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity prior to LUCENE-8563 with k1 + 1 in numerator."""
# (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength))
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = (term_freqs * (k1 + 1)) / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
# searcharray/searcharray/similarity.py
def bm25_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity function, as in Lucene 9."""
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = term_freqs / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
# searcharray/searcharray/phrase/middle_out.py
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
<fim_suffix>
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score) | for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/middle_out.py
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
# searcharray/searcharray/utils/row_viewable_matrix.py
def __init__(self, mat: SparseMatSet, rows: Optional[np.ndarray] = None, subset=False):
self.mat = mat
self.col_cache: Dict[int, np.ndarray] = {}
self.cols_cached: List[int] = []
if rows is None:
self.rows = np.arange(self.mat.shape[0])
elif isinstance(rows, numbers.Integral):
self.rows = np.array([rows])
else:
self.rows = rows
self.subset = subset
# searcharray/searcharray/phrase/posn_diffs.py
def stack_term_posns(term_posns: List[List[np.ndarray]], phrase_freqs: np.ndarray, width: int = 10):
# Pad for easy difference computation
keep_term_posns = []
# keep_mask = np.ones(len(self), dtype=bool)
for term_posn in term_posns:
this_term_posns = vstack_with_mask(term_posn, phrase_freqs, width=width)
keep_term_posns.append(this_term_posns)
return keep_term_posns
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
<fim_suffix>
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1 | for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1 | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/posn_diffs.py
def compute_phrase_freqs(term_posns, phrase_freqs, slop=1, width=10):
"""Compute phrase freq using matrix-diff method for docs up to width posns. Skip others.
Parameters
----------
term_posns: list of np.ndarray term positions for a given term across multiple docs
phrase_freqs: np.ndarray, phrase freqs for each doc present in term_posns
Returns
-------
phrase_freqs: np.ndarray, phrase freqs for each doc present in mask
See Also
--------
Colab notebook: https://colab.research.google.com/drive/1NRxeO8Ya8jSlFP5YwZaGh1-43kDH4OXG?authuser=1#scrollTo=5JZV8svpauYB
"""
if len(term_posns[0]) != len(phrase_freqs):
raise ValueError("term_posns and phrase_freqs must be same length")
stacked = stack_term_posns(term_posns, phrase_freqs, width=width)
phrase_freqs = _compute_phrase_freqs(stacked, phrase_freqs, slop=slop)
phrase_freqs[phrase_freqs == -2] = -1
return phrase_freqs
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
# searcharray/searcharray/postings.py
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
<fim_suffix>
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1]) | for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1]) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
# searcharray/searcharray/postings.py
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
# searcharray/searcharray/postings.py
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
<fim_suffix>
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")") | for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")") | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/mat_set.py
def __str__(self):
as_str = [""]
for idx, (row, row_next) in enumerate(zip(self.rows, self.rows[1:])):
as_str.append(f"{idx}: {self.cols[row:row_next]}")
return "\n".join(as_str)
# searcharray/searcharray/indexing.py
def _lex_sort(terms_w_posns):
"""Sort terms, then doc_id, then posn."""
# Because docs / posns already sorted, we can just sort on terms
# Equivelant to np.lexsort(terms_w_posns[[::-1], :])
return np.argsort(terms_w_posns[0, :], kind='stable')
# searcharray/searcharray/phrase/middle_out.py
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
"""
"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
<fim_suffix>
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle>for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn) | for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/posn_diffs.py
def stack_term_posns(term_posns: List[List[np.ndarray]], phrase_freqs: np.ndarray, width: int = 10):
# Pad for easy difference computation
keep_term_posns = []
# keep_mask = np.ones(len(self), dtype=bool)
for term_posn in term_posns:
this_term_posns = vstack_with_mask(term_posn, phrase_freqs, width=width)
keep_term_posns.append(this_term_posns)
return keep_term_posns
# searcharray/searcharray/utils/row_viewable_matrix.py
def rowwise_eq(mat: SparseMatSet, other: SparseMatSet) -> Union[bool, np.ndarray]:
"""Check equals on a row-by-row basis."""
if len(mat) != len(other):
return False
row_eq = np.zeros(mat.shape[0], dtype=np.dtype('bool'))
for row_idx in range(len(mat)):
if np.all(mat[row_idx] == other[row_idx]):
row_eq[row_idx] = True
return row_eq
# searcharray/searcharray/utils/mat_set.py
def __getitem__(self, key):
# Iterate keys
beg_keys = self.rows[:-1][key]
end_keys = self.rows[1:][key]
if not isinstance(beg_keys, np.ndarray):
beg_keys = np.asarray([beg_keys])
end_keys = np.asarray([end_keys])
cols = [self.cols[beg:end] for beg, end in zip(beg_keys, end_keys)]
rows = [0] + [len(row) for row in cols]
rows = np.asarray(rows).flatten()
rows = np.cumsum(rows)
try:
cols = np.concatenate(cols)
except ValueError:
cols = np.asarray([], dtype=np.uint32)
return SparseMatSet(cols, rows)
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
<fim_suffix>
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>for curr_mask in masks:
mask = mask & curr_mask | for curr_mask in masks:
mask = mask & curr_mask | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/row_viewable_matrix.py
def rowwise_eq(mat: SparseMatSet, other: SparseMatSet) -> Union[bool, np.ndarray]:
"""Check equals on a row-by-row basis."""
if len(mat) != len(other):
return False
row_eq = np.zeros(mat.shape[0], dtype=np.dtype('bool'))
for row_idx in range(len(mat)):
if np.all(mat[row_idx] == other[row_idx]):
row_eq[row_idx] = True
return row_eq
# searcharray/searcharray/utils/roaringish.py
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
# searcharray/searcharray/utils/roaringish.py
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
<fim_suffix>
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0) | for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
# searcharray/searcharray/postings.py
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
# searcharray/searcharray/postings.py
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
<fim_suffix>
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1]) | for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1]) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/term_dict.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
# searcharray/searcharray/postings.py
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
# searcharray/searcharray/postings.py
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
"""
import sys
class TermMissingError(KeyError):
def __init__(self, msg):
super().__init__(msg)
class TermDict:
def __init__(self):
self.term_to_ids = {}
self.id_to_terms = {}
def add_term(self, term):
if term in self.term_to_ids:
return self.term_to_ids[term]
term_id = len(self.term_to_ids)
self.term_to_ids[term] = term_id
self.id_to_terms[term_id] = term
return term_id
def copy(self):
new_dict = TermDict()
new_dict.term_to_ids = dict(self.term_to_ids)
new_dict.id_to_terms = dict(self.id_to_terms.copy())
return new_dict
def get_term_id(self, term):
<fim_suffix>
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
def compatible(self, other) -> bool:
# Intersect the terms in both dictionaries
terms_self = list(self.term_to_ids.keys())
terms_other = list(other.term_to_ids.keys())
shortest = min(len(terms_self), len(terms_other))
return terms_self[:shortest] == terms_other[:shortest]
# If the intersection is empty, the dictionaries are not compatible
def __len__(self):
return len(self.term_to_ids)
def __repr__(self):
return repr(self.term_to_ids)
@property
def nbytes(self):
bytes_used = sys.getsizeof(self.term_to_ids) + sys.getsizeof(self.id_to_terms)
return bytes_used
<fim_middle>try:
return self.term_to_ids[term] | try:
return self.term_to_ids[term] | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/posn_diffs.py
def compute_phrase_freqs(term_posns, phrase_freqs, slop=1, width=10):
"""Compute phrase freq using matrix-diff method for docs up to width posns. Skip others.
Parameters
----------
term_posns: list of np.ndarray term positions for a given term across multiple docs
phrase_freqs: np.ndarray, phrase freqs for each doc present in term_posns
Returns
-------
phrase_freqs: np.ndarray, phrase freqs for each doc present in mask
See Also
--------
Colab notebook: https://colab.research.google.com/drive/1NRxeO8Ya8jSlFP5YwZaGh1-43kDH4OXG?authuser=1#scrollTo=5JZV8svpauYB
"""
if len(term_posns[0]) != len(phrase_freqs):
raise ValueError("term_posns and phrase_freqs must be same length")
stacked = stack_term_posns(term_posns, phrase_freqs, width=width)
phrase_freqs = _compute_phrase_freqs(stacked, phrase_freqs, slop=slop)
phrase_freqs[phrase_freqs == -2] = -1
return phrase_freqs
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
# searcharray/searcharray/postings.py
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
<fim_suffix>
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>try:
return int(value) | try:
return int(value) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/middle_out.py
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/utils/row_viewable_matrix.py
def rowwise_eq(mat: SparseMatSet, other: SparseMatSet) -> Union[bool, np.ndarray]:
"""Check equals on a row-by-row basis."""
if len(mat) != len(other):
return False
row_eq = np.zeros(mat.shape[0], dtype=np.dtype('bool'))
for row_idx in range(len(mat)):
if np.all(mat[row_idx] == other[row_idx]):
row_eq[row_idx] = True
return row_eq
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
<fim_suffix>
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs) | try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/row_viewable_matrix.py
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
return self.copy_row_at(key)
else:
return self.slice(key)
# searcharray/searcharray/phrase/middle_out.py
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
# searcharray/searcharray/utils/mat_set.py
def __getitem__(self, key):
# Iterate keys
beg_keys = self.rows[:-1][key]
end_keys = self.rows[1:][key]
if not isinstance(beg_keys, np.ndarray):
beg_keys = np.asarray([beg_keys])
end_keys = np.asarray([end_keys])
cols = [self.cols[beg:end] for beg, end in zip(beg_keys, end_keys)]
rows = [0] + [len(row) for row in cols]
rows = np.asarray(rows).flatten()
rows = np.cumsum(rows)
try:
cols = np.concatenate(cols)
except ValueError:
cols = np.asarray([], dtype=np.uint32)
return SparseMatSet(cols, rows)
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
<fim_suffix>
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns) | try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/solr.py
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
<fim_suffix>
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>try:
return self.posns.docfreq(self.term_dict.get_term_id(token)) | try:
return self.posns.docfreq(self.term_dict.get_term_id(token)) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/solr.py
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/utils/roaringish.py
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
<fim_suffix>
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>try:
return self.docfreq_cache[term_id] | try:
return self.docfreq_cache[term_id] | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/utils/mat_set.py
def build(self):
return SparseMatSet(cols=np.asarray(self.cols, dtype=np.uint32),
rows=np.asarray(self.rows, dtype=np.uint32))
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
<fim_suffix>
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches | try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
# searcharray/searcharray/solr.py
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
<fim_suffix>
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>try:
return self.termfreq_cache[term_id] | try:
return self.termfreq_cache[term_id] | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
<fim_suffix>
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>try:
return int(value) | try:
return int(value) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/solr.py
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
<fim_suffix>
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids) | try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/middle_out.py
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/utils/row_viewable_matrix.py
def rowwise_eq(mat: SparseMatSet, other: SparseMatSet) -> Union[bool, np.ndarray]:
"""Check equals on a row-by-row basis."""
if len(mat) != len(other):
return False
row_eq = np.zeros(mat.shape[0], dtype=np.dtype('bool'))
for row_idx in range(len(mat)):
if np.all(mat[row_idx] == other[row_idx]):
row_eq[row_idx] = True
return row_eq
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
<fim_suffix>
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>except TermMissingError:
return phrase_freqs | except TermMissingError:
return phrase_freqs | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/posn_diffs.py
def compute_phrase_freqs(term_posns, phrase_freqs, slop=1, width=10):
"""Compute phrase freq using matrix-diff method for docs up to width posns. Skip others.
Parameters
----------
term_posns: list of np.ndarray term positions for a given term across multiple docs
phrase_freqs: np.ndarray, phrase freqs for each doc present in term_posns
Returns
-------
phrase_freqs: np.ndarray, phrase freqs for each doc present in mask
See Also
--------
Colab notebook: https://colab.research.google.com/drive/1NRxeO8Ya8jSlFP5YwZaGh1-43kDH4OXG?authuser=1#scrollTo=5JZV8svpauYB
"""
if len(term_posns[0]) != len(phrase_freqs):
raise ValueError("term_posns and phrase_freqs must be same length")
stacked = stack_term_posns(term_posns, phrase_freqs, width=width)
phrase_freqs = _compute_phrase_freqs(stacked, phrase_freqs, slop=slop)
phrase_freqs[phrase_freqs == -2] = -1
return phrase_freqs
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
# searcharray/searcharray/postings.py
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
<fim_suffix>
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>except ValueError:
raise ValueError(error_message) | except ValueError:
raise ValueError(error_message) | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/row_viewable_matrix.py
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
return self.copy_row_at(key)
else:
return self.slice(key)
# searcharray/searcharray/phrase/middle_out.py
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
# searcharray/searcharray/utils/mat_set.py
def __getitem__(self, key):
# Iterate keys
beg_keys = self.rows[:-1][key]
end_keys = self.rows[1:][key]
if not isinstance(beg_keys, np.ndarray):
beg_keys = np.asarray([beg_keys])
end_keys = np.asarray([end_keys])
cols = [self.cols[beg:end] for beg, end in zip(beg_keys, end_keys)]
rows = [0] + [len(row) for row in cols]
rows = np.asarray(rows).flatten()
rows = np.cumsum(rows)
try:
cols = np.concatenate(cols)
except ValueError:
cols = np.asarray([], dtype=np.uint32)
return SparseMatSet(cols, rows)
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
<fim_suffix>
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>except IndexError:
raise IndexError("index out of bounds") | except IndexError:
raise IndexError("index out of bounds") | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/solr.py
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/utils/roaringish.py
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
<fim_suffix>
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq | except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/solr.py
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
<fim_suffix>
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>except TermMissingError:
return 0 | except TermMissingError:
return 0 | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
# searcharray/searcharray/solr.py
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
<fim_suffix>
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs | except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/solr.py
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
<fim_suffix>
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val | except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/utils/mat_set.py
def build(self):
return SparseMatSet(cols=np.asarray(self.cols, dtype=np.uint32),
rows=np.asarray(self.rows, dtype=np.uint32))
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
<fim_suffix>
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>except TermMissingError:
return np.zeros(len(self), dtype=int) | except TermMissingError:
return np.zeros(len(self), dtype=int) | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
<fim_suffix>
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>except ValueError:
raise ValueError(error_message) | except ValueError:
raise ValueError(error_message) | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/term_dict.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
# searcharray/searcharray/postings.py
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
# searcharray/searcharray/postings.py
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
"""
import sys
class TermMissingError(KeyError):
def __init__(self, msg):
super().__init__(msg)
class TermDict:
def __init__(self):
self.term_to_ids = {}
self.id_to_terms = {}
def add_term(self, term):
if term in self.term_to_ids:
return self.term_to_ids[term]
term_id = len(self.term_to_ids)
self.term_to_ids[term] = term_id
self.id_to_terms[term_id] = term
return term_id
def copy(self):
new_dict = TermDict()
new_dict.term_to_ids = dict(self.term_to_ids)
new_dict.id_to_terms = dict(self.id_to_terms.copy())
return new_dict
def get_term_id(self, term):
try:
return self.term_to_ids[term]
<fim_suffix>
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
def compatible(self, other) -> bool:
# Intersect the terms in both dictionaries
terms_self = list(self.term_to_ids.keys())
terms_other = list(other.term_to_ids.keys())
shortest = min(len(terms_self), len(terms_other))
return terms_self[:shortest] == terms_other[:shortest]
# If the intersection is empty, the dictionaries are not compatible
def __len__(self):
return len(self.term_to_ids)
def __repr__(self):
return repr(self.term_to_ids)
@property
def nbytes(self):
bytes_used = sys.getsizeof(self.term_to_ids) + sys.getsizeof(self.id_to_terms)
return bytes_used
<fim_middle>except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.") | except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.") | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/similarity.py
def bm25_legacy_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity prior to LUCENE-8563 with k1 + 1 in numerator."""
# (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength))
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = (term_freqs * (k1 + 1)) / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
# searcharray/searcharray/similarity.py
def bm25_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity function, as in Lucene 9."""
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = term_freqs / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
# searcharray/searcharray/phrase/middle_out.py
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
<fim_suffix>
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>def listify(x):
return x if isinstance(x, list) else [x] | def listify(x):
return x if isinstance(x, list) else [x] | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/posn_diffs.py
def compute_phrase_freqs(term_posns, phrase_freqs, slop=1, width=10):
"""Compute phrase freq using matrix-diff method for docs up to width posns. Skip others.
Parameters
----------
term_posns: list of np.ndarray term positions for a given term across multiple docs
phrase_freqs: np.ndarray, phrase freqs for each doc present in term_posns
Returns
-------
phrase_freqs: np.ndarray, phrase freqs for each doc present in mask
See Also
--------
Colab notebook: https://colab.research.google.com/drive/1NRxeO8Ya8jSlFP5YwZaGh1-43kDH4OXG?authuser=1#scrollTo=5JZV8svpauYB
"""
if len(term_posns[0]) != len(phrase_freqs):
raise ValueError("term_posns and phrase_freqs must be same length")
stacked = stack_term_posns(term_posns, phrase_freqs, width=width)
phrase_freqs = _compute_phrase_freqs(stacked, phrase_freqs, slop=slop)
phrase_freqs[phrase_freqs == -2] = -1
return phrase_freqs
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
# searcharray/searcharray/postings.py
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
<fim_suffix>
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message) | def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message) | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/language_models/embedding_model_manager.py
def get_embedding_case(self, args, function_description: FunctionDescription, kwargs, examples=None):
# example_input = f"Examples:{examples}\n" if examples else ""
content = f"Name: {function_description.name}\nArgs: {args}\nKwargs: {kwargs}"
function_hash = function_description.__hash__()
if function_hash in self.function_modeler.teacher_models_override: # check for overrides
model = self.function_modeler.teacher_models_override[function_hash][0] # take currently the first model
else:
model = DEFAULT_EMBEDDING_MODELS[DEFAULT_EMBEDDING_MODEL_NAME]
# loggings
if function_hash not in self.initialized_functions:
logging.info(f"Generating function embeddings for {function_description.name} with {model.model_name}")
self.initialized_functions[function_hash] = model.model_name
elif self.initialized_functions[function_hash] != model.model_name:
logging.info(f"Switching embeddings generation for {function_description.name} from {self.initialized_functions[function_hash]} to {model.model_name}")
self.initialized_functions[function_hash] = model.model_name
return content, model
# tanuki_py/src/tanuki/language_models/llm_api_abc.py
def generate(self, model, system_message, prompt, **kwargs):
"""
The main generation function, given the args, kwargs, function_modeler, function description and model type, generate a response and check if the datapoint can be saved to the finetune dataset
"""
pass
# tanuki_py/src/tanuki/utils.py
def get_key(args, kwargs) -> tuple:
args_tuple = _deep_tuple(args)
kwargs_tuple = _deep_tuple(kwargs)
return args_tuple, kwargs_tuple
"""
import json
from typing import Any, Dict
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.llm_api_abc import LLM_API
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.models.language_model_output import LanguageModelOutput
from tanuki.utils import approximate_token_count
from tanuki.validator import Validator
from tanuki.models.api_manager import APIManager
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
import logging
class LanguageModelManager(object):
"""
The LanguageModelManager is responsible for managing the language models and their outputs operationally,
this includes:
- Generating outputs from the language models
- Repairing outputs from the language models
- Saving outputs from the language models
- Finetuning the language models from the saved outputs
"""
def __init__(self,
function_modeler: FunctionModeler,
api_provider: APIManager,
generation_token_limit=512,) -> None:
self.api_provider = api_provider
self.function_modeler = function_modeler
self.default_generation_length = generation_token_limit
self.initialized_functions = {}
self.token_counts = {}
def __call__(self,
args,
function_description: FunctionDescription,
kwargs,
validator: Validator,
generation_parameters: dict) -> Any:
# add the generation length if not there
if "max_new_tokens" not in generation_parameters:
generation_parameters["max_new_tokens"] = self.default_generation_length
output = self.generate(args, kwargs, function_description, generation_parameters)
# start parsing the object, very hacky way for the time being
choice_parsed = self._parse_choice(output)
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
choice, choice_parsed, successful_repair = self.repair_output(args,
kwargs,
function_description,
output.generated_response,
validator,
generation_parameters)
if not successful_repair:
raise TypeError(
f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{output.generated_response}'")
output.generated_response = choice
output.distilled_model = False
datapoint = FunctionExample(args, kwargs, output.generated_response)
if output.suitable_for_finetuning and not output.distilled_model:
self.function_modeler.postprocess_symbolic_datapoint(function_description.__hash__(), function_description,
datapoint, repaired=not valid)
instantiated = validator.instantiate(choice_parsed, function_description.output_type_hint)
return instantiated
def _parse_choice(self, output):
try:
# json load
choice_parsed = json.loads(output.generated_response)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(output.generated_response)
except:
choice_parsed = output.generated_response
return choice_parsed
def generate(self, args, kwargs, function_description, llm_parameters={}):
"""
The main generation function, given the args, kwargs, function description and model type, generate a response and check if the datapoint can be saved to the finetune dataset
"""
func_hash = function_description.__hash__()
prompt, model, save_to_finetune, is_distilled_model = self.get_generation_case(args, kwargs,
function_description,
llm_parameters,
func_hash)
# loggings
current_function_setup = self.initialized_functions.get(func_hash, None) # getting the current function setup - model and align statements
if current_function_setup:
generator_model = current_function_setup["model"]
if is_distilled_model:
logging.info(f"Generating function outputs for {function_description.name} with a finetuned model: {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model == "":
logging.info(f"Found {len(current_function_setup['examples'])} align statements for {function_description.name}. Generating function outputs with {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model != model.model_name:
logging.info(f"Switching output generation from {generator_model} to {model.model_name} for function {function_description.name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
choice = self._synthesise_answer(prompt, model, llm_parameters)
output = LanguageModelOutput(choice, save_to_finetune, is_distilled_model)
return output
def _synthesise_answer(self, prompt, model, llm_parameters):
"""
Synthesise an answer given the prompt, model, model_type and llm_parameters
Args:
prompt (str): The prompt to send to the model
model (BaseModelConfig): The model to use for generation
llm_parameters (dict): The parameters to use for generation
return:
choice (str): The generated response
"""
system_message = model.system_message
return self.api_provider[model.provider].generate(model, system_message, prompt, **llm_parameters)
def get_generation_case(self, args, kwargs, function_description, llm_parameters, func_hash):
"""
Get the generation case with the correct prompt and model
First get the current model, then if distilled model, do zero-shot prompt and return False as suitable_for_finetune
If not distilled model, check if suitable for finetuning, create the prompt and return the correct model given the token count
"""
f = str(function_description.__dict__.__repr__())
distilled_model, teacher_models = self.function_modeler.get_models(function_description)
is_distilled_model = distilled_model.model_name != ""
suitable_for_distillation, input_prompt_token_count = self.suitable_for_finetuning_token_check(args, kwargs, f,
distilled_model)
if func_hash not in self.initialized_functions:
# initialise the initialized_functions dict
self.initialized_functions[func_hash] = {"model": "", "examples": []}
# no examples needed, using a finetuned model. Dont save to finetune dataset
if is_distilled_model and suitable_for_distillation:
prompt = self.construct_prompt(f, args, kwargs, [], distilled_model)
return prompt, distilled_model, suitable_for_distillation, True
else:
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=16)
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
# update the examples in the initialized_functions dict
self.initialized_functions[func_hash]["examples"] = examples
examples_token_count = sum([approximate_token_count(example) for example in examples])
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(teacher_models,
examples_token_count + input_prompt_token_count + generation_tokens,
len(examples))
if model:
examples_with_parsing_tokens = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput:{model.parsing_helper_tokens['start_token']}{align['output']}{model.parsing_helper_tokens['end_token']}" for align in
aligns]
prompt = self.construct_prompt(f, args, kwargs, examples_with_parsing_tokens, model)
return prompt, model, suitable_for_distillation, False
else:
raise ValueError(
"The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
def suitable_for_finetuning_token_check(self, args, kwargs, f, distilled_model: BaseModelConfig):
<fim_suffix>
# check if finetunable
finetuning_prompt = f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
input_prompt_token_count = approximate_token_count(finetuning_prompt)
if distilled_model.system_message_token_count < 0:
distilled_model.system_message_token_count = approximate_token_count(distilled_model.system_message)
if distilled_model.instruction_token_count < 0:
distilled_model.instruction_token_count = approximate_token_count(distilled_model.instructions)
suitable_for_finetune = input_prompt_token_count + distilled_model.instruction_token_count + distilled_model.system_message_token_count < distilled_model.context_length
return suitable_for_finetune, input_prompt_token_count
def construct_prompt(self, f, args, kwargs, examples, model):
"""
Construct a prompt given the model, function description, args, kwargs and examples
Args:
model (BaseModelConfig): The model to use for generation
f (str): The function description
args (tuple): The args of the function
kwargs (tuple): The kwargs of the function
examples (list): The examples of the function
Returns:
content (str): The prompt to send to the model
"""
if examples:
final_examples = "\n".join(
[f"{align}" for align in
examples])
example_input = f"Examples:{final_examples}\n"
else:
example_input = ""
instruction_prompt = model.instructions
content = f"{instruction_prompt}\nFunction: {f}\n{example_input}---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
return content
def repair_generate(self, args, kwargs, f, failed_outputs_list, aligns, models, llm_parameters):
"""
Repair the output given the input, function description, failed outputs list, examples and models
"""
# get the token counts
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
examples_token_count = sum([approximate_token_count(example) for example in examples])
failed_examples_token_count = sum([approximate_token_count(failed_output[0]) + approximate_token_count(failed_output[1]) for failed_output in failed_outputs_list])
input_prompt_token_count = approximate_token_count(f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:")
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(models,
examples_token_count+input_prompt_token_count+generation_tokens+failed_examples_token_count,
len(examples))
if model:
prompt = self.generate_repair_prompt(args, kwargs, f, failed_outputs_list, examples, model)
logging.info(f"Previous output failed type validation, attempting to repair with {model.model_name}")
choice = self._synthesise_answer(prompt, model, llm_parameters)
return choice
else:
return None
def generate_repair_prompt(self, args, kwargs, f, failed_outputs_list, examples, model):
"""
Generate a repair prompt given the args, kwargs, function description, failed outputs list and examples
"""
if examples:
final_examples = "\n".join(
[f"{model.parsing_helper_tokens['start_token']}{align}{model.parsing_helper_tokens['end_token']}" for align in
examples])
successful_examples = f"Examples:{final_examples}\n"
else:
successful_examples = ""
failed_examples = ""
for failed_output in failed_outputs_list:
failed_examples += f"Output: {failed_output[0]}\nError: {failed_output[1]}\n\n"
end_token_addition = ""
if model.parsing_helper_tokens["end_token"]:
end_token_addition = f"Make sure to add the {model.parsing_helper_tokens['end_token']} token at the end of the output."
prompt = f"{model.repair_instruction}{end_token_addition}\nFUNCTION DESCRIPTION: {f}\n{successful_examples}---{model.parsing_helper_tokens['start_token']}Inputs:\nArgs: {args}\nKwargs: {kwargs}\nFAILED EXAMPLES: {failed_examples}Correct output:"
return prompt
def choose_model_from_tokens(self, models, input_token_count, nr_of_examples=0):
"""
Choose a model from the models given the token count and number of examples
Args:
models (list): The models to choose from
input_token_count (int): The token count of the input
nr_of_examples (int): The number of examples
Returns:
model (BaseModelConfig): The chosen model
"""
for model in models:
# check if input token count is less than the context length
# If the model config has custom messages, then use those, otherwise use the default ones
if model.system_message_token_count < 0:
model.system_message_token_count = approximate_token_count(model.system_message)
if model.instruction_token_count < 0:
model.instruction_token_count = approximate_token_count(model.instructions)
if model.parsing_helper_tokens["start_token"]:
input_token_count += 2*nr_of_examples
if model.parsing_helper_tokens["end_token"]:
input_token_count += 2*nr_of_examples
total_token_count = input_token_count + model.instruction_token_count + model.system_message_token_count
if total_token_count < model.context_length:
return model
return None
def repair_output(self,
args: tuple,
kwargs: dict,
function_description: FunctionDescription,
choice,
validator: Validator,
generation_parameters: dict) -> tuple:
"""
Repair an output, that failed type validation by generating a new output using the teacher model and the error
Args:
args (tuple): The args of the function
kwargs (dict): The kwargs of the function
function_description (FunctionDescription): The function description
choice: The output that failed type validation, type is arbitrary
validator (Validator): The validator object
Returns:
choice (str): The choice that was generated by the language model
choice_parsed: The parsed choice, type is arbitrary
valid (bool): Whether the output was correctly repaired was valid
"""
# get the teacher models
teacher_models = self.function_modeler.get_models(function_description)[1]
valid = False
retry_index = 5
f = str(function_description.__dict__.__repr__() + "\n")
error = f"Output type was not valid. Expected an valid object of type {function_description.output_type_hint}, got '{choice}'"
# instantiate the failed outputs list
failed_outputs_list = [(choice, error)]
while retry_index > 0 and not valid:
# get the alignments
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=5)
# Generate the reparied LLM output
choice = self.repair_generate(args,
kwargs,
f,
failed_outputs_list,
aligns,
teacher_models,
generation_parameters)
if not choice:
# if no choice then the input was too long for the model
# no specific error but the retry index goes down
retry_index -= 1
continue
# start parsing the object
try:
# json load
choice_parsed = json.loads(choice)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(choice)
except:
choice_parsed = choice
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
# if it's not valid, add it to the failed outputs list
error = f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{choice}'"
failed_outputs_list.append((choice, error))
retry_index -= 1
if valid:
logging.info(f"Successfully repaired output.")
return choice, choice_parsed, valid
<fim_middle>"""
Check if the inputs are suitable for finetuning, i.e are below the finetuning token count
""" | """
Check if the inputs are suitable for finetuning, i.e are below the finetuning token count
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/utils.py
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
<fim_suffix>
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
""" | """
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/validator.py
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
# tanuki_py/src/tanuki/function_modeler.py
def check_for_finetuning(self, function_description, func_hash):
"""
Check for finetuning status
If already finetuning, check for finetuning status
If not finetuning, check for finetuning condition and execute finetuning if condition is met
"""
try:
# check if already finetuning
if "job_id" in self.function_configs[func_hash].current_training_run:
# check for job status
self._check_finetuning_status(func_hash, function_description)
else:
# check for finetuning condition
if self._check_finetuning_condition(func_hash, function_description):
self._execute_finetuning(function_description, func_hash)
except Exception as e:
print(e)
print("Error checking for finetuning")
# tanuki_py/src/tanuki/trackers/abc_buffered_logger.py
def get_patch_location_for_function(self, func_hash, extension="") -> str:
"""
Get the address of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
pass
"""
import dataclasses
import datetime
import inspect
import json
import typing
from typing import get_args, Literal
import string
import types
def json_default(thing):
try:
return dataclasses.asdict(thing)
except TypeError:
pass
if isinstance(thing, datetime.datetime):
return thing.isoformat(timespec='microseconds')
if isinstance(thing, type):
return thing.__name__
#if hasattr(typing, "_GenericAlias") and isinstance(thing, typing._GenericAlias):
if hasattr(typing, "_UnionGenericAlias"):
if isinstance(thing, typing._UnionGenericAlias):
return {
"Union": [json_default(arg) for arg in get_args(thing)]
}
if thing == Literal[...]:
return {
"Literal": thing.__args__
}
if isinstance(thing, type(None)):
return "None"
if isinstance(thing, typing._SpecialForm):
return thing._name
if isinstance(thing, typing._GenericAlias) or isinstance(thing, types.GenericAlias):
return {
"GenericAlias": [json_default(arg) for arg in get_args(thing)]
}
if isinstance(thing, str):
return thing
if isinstance(thing, list) or isinstance(thing, tuple) or isinstance(thing, set):
return [json_default(item) for item in thing]
if isinstance(thing, dict):
return {json_default(key): json_default(value) for key, value in thing.items()}
raise TypeError(f"object of type {type(thing).__name__} not serializable")
def json_dumps(thing):
return json.dumps(
thing,
default=json_default,
ensure_ascii=False,
sort_keys=True,
indent=None,
separators=(',', ':'),
)
def get_model(content, logger, func_hash):
"""
Get the model from the content and the logger.
Decide on model depending on the length of the content. if is finetunable, return model, true, otherwise return model, false
Args:
content (str): the content to be aligned
logger (buffered logger): the logger
func_hash (str): the function hash
Returns:
model (str): the model to be used
finetunable (bool): whether the model is finetunable
"""
num_tokens = approximate_token_count(content)
finetune_limit = logger.finetune_token_limit
finetune_model, teacher_models = logger.get_models(func_hash)
if num_tokens < finetune_limit:
return finetune_model, True
else:
# this is just for backwards compatibility currently
if len(teacher_models) == 0 or isinstance(teacher_models[0], str):
teacher_models = [("gpt-4", 7000),("gpt-4-32k", 31000)]
for model, token_limit in teacher_models:
if num_tokens < token_limit:
return model, False
raise ValueError("The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
def approximate_token_count(content):
"""
Approximate the token count of input
Number of tokens is word tokens (nr of words * 1.33) + nr of special characters (which are usually their own tokens)
Args:
content (str, bytes): the content to be approximated
Returns:
number_of_tokens (int): the number of tokens
"""
common_special_characters = r"\/(){}[]<>|`~@#$%^&*+=-_:;\""
# check if input type is string
if isinstance(content, str):
number_of_word_tokens = int(len(content.split(" "))*1.333)
nr_of_special_characters = sum([content.count(char) for char in common_special_characters])
return number_of_word_tokens + nr_of_special_characters
# check if input is a byte string
if isinstance(content, bytes):
number_of_word_tokens = int(len(content.split(b" "))*1.333)
nr_of_special_characters = sum([content.count(char.encode("utf-8")) for char in common_special_characters])
return number_of_word_tokens + nr_of_special_characters
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
def get_key(args, kwargs) -> tuple:
args_tuple = _deep_tuple(args)
kwargs_tuple = _deep_tuple(kwargs)
return args_tuple, kwargs_tuple
def prepare_object_for_saving(input_object):
<fim_suffix>
# check if list
if isinstance(input_object, list):
return [prepare_object_for_saving(item) for item in input_object]
# check if tuple
elif isinstance(input_object, tuple):
return tuple([prepare_object_for_saving(item) for item in input_object])
# check if dict
elif isinstance(input_object, dict):
return {key: prepare_object_for_saving(value) for key, value in input_object.items()}
# check if pydantic object
if hasattr(input_object, "__dict__"):
attributes = input_object.__dict__
for key, value in attributes.items():
attributes[key] = prepare_object_for_saving(value)
return attributes
#
# check if datetime for custom logic
elif isinstance(input_object, datetime.datetime) or isinstance(input_object, datetime.date) or isinstance(input_object, datetime.time):
attrs = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond', 'tzinfo']
attributes = {attr: getattr(input_object, attr, None) for attr in attrs if getattr(input_object, attr, None) is not None}
return attributes
return input_object
def encode_int(n):
# Define the character set for encoding
charset = string.ascii_lowercase + string.digits + "_"
return charset[n]
def decode_int(s):
# Define the character set for encoding
charset = string.ascii_lowercase + string.digits + "_"
return charset.index(s)
def _get_source_ipython(func) -> str:
"""
Get the source code of a function from IPython (to support Colab and Jupyter notebooks)
:param func: The function to get the source code from
:return: The source code of the function
"""
# Get the IPython instance
from IPython import get_ipython
ipython = get_ipython()
# Get the input history
input_cells = ipython.history_manager.input_hist_parsed
class_name = func.__name__
source_code = None
for cell in input_cells:
if f"class {class_name}" in cell:
source_code = cell
break
# If found, print the source code
return source_code
def get_source(func) -> str:
"""
Get the source code of a function
Args:
func (function): the function to get the source code from
Returns:
source (str): the source code of the function
"""
try:
return inspect.getsource(func)
except Exception:
return _get_source_ipython(func)<fim_middle>"""
Get a dictionary representation of the object
""" | """
Get a dictionary representation of the object
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _save_contrastive_alignment_pair(self, function_hash: str, args, kwargs, pair, positive=True):
"""
Save a contrastive pair
"""
example = FunctionExample(args, kwargs, pair)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_embeddable_align(function_hash, example, positive)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if positive:
if function_hash in self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if not positive:
if function_hash in self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.embeddable_align_buffer:
self.embeddable_align_buffer[function_hash] = bytearray()
self.embeddable_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
# tanuki_py/src/tanuki/function_modeler.py
def save_symbolic_align_statements(self, function_hash, args, kwargs, output):
"""
Save the align statements and add to the align buffer
Do not save if the function hash is in the store data blacklist
Then just add the datapoints to the align buffer
"""
# prepare output for saving and later parsing
# make a deepcopy of the output to avoid changing the original object
copy_output = copy.deepcopy(output)
parsed_output = prepare_object_for_saving(copy_output)
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
example = FunctionExample(parsed_args, parsed_kwargs, parsed_output)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_symbolic_align(function_hash, example)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if function_hash in self.dataset_sizes[SYMBOLIC_ALIGNMENTS]:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.symbolic_align_buffer:
self.symbolic_align_buffer[function_hash] = bytearray()
self.symbolic_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def get_patch_location_for_function(self, func_hash, extension: Union[
ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str:
"""
Get the local location of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
return os.path.join(self.log_directory, func_hash + extension)
"""
import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, NEGATIVE_FILE_EXTENSION, PATCH_FILE_EXTENSION
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.models.function_config import FunctionConfig
# PATCH_FILE_EXTENSION_TYPE = Literal[".patches"]
# ALIGN_FILE_EXTENSION_TYPE = Literal[".alignments"]
# POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".positive_embedding"]
# NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".negative_embedding"]
#
# PATCH_FILE_EXTENSION: PATCH_FILE_EXTENSION_TYPE = ".patches"
# ALIGN_FILE_EXTENSION: ALIGN_FILE_EXTENSION_TYPE = ".alignments"
# POSITIVE_EMBEDDING_FILE_EXTENSION: POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_positives"
# NEGATIVE_EMBEDDING_FILE_EXTENSION: NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_negatives"
#
# EXPECTED_ITEMS = 10000
# FALSE_POSITIVE_RATE = 0.01
# LIB_NAME = "tanuki"
# ENVVAR = "TANUKI_LOG_DIR"
class ABCBufferedLogger(DatasetWorker):
def __init__(self, name, level=15):
self.buffers = {}
self.mapped_files = {}
self.miss_count = 0
self.hit_count = 0
self.flush_limit = {}
self.buffer_rolling_size = {}
self.write_count = 0
self.write_limit = 1000 # Save the Bloom filter every 1000 writes
super().__init__(name, level)
self.bloom_filter = self.create_bloom_filter()
self.load_bloom_filter()
self.default_function_config = FunctionConfig()
@abstractmethod
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
"""
Get an instance of the bloom filter persistence provider. This exposes some persistent file storage,
that must support reading and writing raw byte streams.
:return:
"""
pass
@abstractmethod
def load_existing_datasets(self) -> Dict[str, Dict[str, Any]]:
"""
Get the lengths of all datasets backing the registered functions, including aligns.
:return:
"""
pass
@abstractmethod
def ensure_persistence_location_exists(self):
"""
Ensure that the place we will be writing to actually exists. If not, create it.
"""
pass
@abstractmethod
def get_patch_location_for_function(self, func_hash, extension="") -> str:
"""
Get the address of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
pass
@abstractmethod
def write(self, path, data, mode="a") -> None:
pass
@abstractmethod
def read(self, path) -> str:
pass
@abstractmethod
def get_hash_from_path(self, path) -> str:
pass
@abstractmethod
def does_object_exist(self, path) -> bool:
pass
def create_bloom_filter(self):
bloom_filter_persistence = self.get_bloom_filter_persistence()
bloom_filter = BloomFilter(
bloom_filter_persistence,
expected_number_of_elements=EXPECTED_ITEMS,
false_positive_probability=FALSE_POSITIVE_RATE)
return bloom_filter
def load_bloom_filter(self):
try:
self.bloom_filter.load()
except FileNotFoundError:
self.debug("No Bloom filter found. Creating a new one.")
def write_symbolic_align_call(self, func_hash, example) -> bool:
log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool:
if positive:
log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION)
else:
log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def log_embeddable_align(self, func_hash, example, positive=True, **kws):
"""
Log a contrastive function invocation
Args:
func_hash: A string representation of the function signature and input parameters
example: The example object
positive: Whether the example is positive or negative
**kws:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_embeddable_align_call(func_hash, example, positive)
return successfully_saved, new_datapoint
def log_symbolic_align(self, func_hash, *args, **kws):
"""
Log an align function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param args: Example objects
:param kws:
:return:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
example = args[0]
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_symbolic_align_call(func_hash, example)
return successfully_saved, new_datapoint
def log_symbolic_patch(self, func_hash, example):
<fim_suffix>
if not isinstance(func_hash, str):
func_hash = str(func_hash)
example_data = str(example.__dict__).encode('utf-8') + b'\n'
bloom_filter_representation = func_hash + '_' + example_data.decode('utf-8')
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
self.hit_count += 1
return {}
self.miss_count += 1
# Add to Bloom Filter
self.bloom_filter.add(bloom_filter_representation)
try:
self.ensure_persistence_location_exists()
except Exception as e:
return {}
log_file_path = self.get_patch_location_for_function(func_hash, extension=PATCH_FILE_EXTENSION)
if log_file_path not in self.buffers:
self.buffers[log_file_path] = bytearray()
if log_file_path not in self.flush_limit:
self.flush_limit[log_file_path] = 1
self.buffers[log_file_path].extend(example_data)
self.write_count += 1
if log_file_path not in self.buffer_rolling_size:
self.buffer_rolling_size[log_file_path] = 1
else:
self.buffer_rolling_size[log_file_path] += 1
if self.write_count >= self.write_limit:
written_datapoints = self.flush()
self.save_bloom_filter()
self.write_count = 0 # Reset counter
return written_datapoints
if len(self.buffers[log_file_path]) >= min(self.flush_limit[log_file_path], 4096): # Flush after reaching 4KB
written_datapoints = {}
try:
self.write(log_file_path, self.buffers[log_file_path], mode="a+b")
# update buffers
written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path]
self.buffers[log_file_path].clear()
self.buffer_rolling_size[log_file_path] = 0
self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path]
self.save_bloom_filter()
except Exception as e:
pass
return written_datapoints
return {}
def save_bloom_filter(self):
try:
self.bloom_filter.save()
except Exception as e:
self.warning("Could not save Bloom filter: {}".format(e))
def flush(self):
# get log directory
written_datapoints = {}
for log_file_path, buffer in self.buffers.items():
if len(buffer) > 0:
try:
self.write(log_file_path, buffer, mode="a+b")
written_datapoints[self.get_hash_from_path(log_file_path)] = self.buffer_rolling_size[log_file_path]
self.buffer_rolling_size[log_file_path] = 0
buffer.clear()
except Exception as e:
pass
return written_datapoints
def load_function_config(self, func_hash):
"""
Get the config file for the function. Uses the message and log directory
Config file has to be in .json
"""
default = False
try: # try to get the config from the disk. If inaccessible, create a new default one
self.ensure_persistence_location_exists()
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
if not self.does_object_exist(config_path):
function_config = self.default_function_config
default = True
func_config_dict = function_config.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
else:
function_config = FunctionConfig().load_from_dict(self.read_json(config_path))
except Exception as e:
function_config = self.default_function_config
default = True
return function_config, default
def update_function_config(self, func_hash, config_to_be_saved):
"""
Save the config file
"""
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
try:
func_config_dict = config_to_be_saved.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
except Exception as e:
pass
def write_json(self, path, data):
self.write(path, json.dumps(data))
def read_json(self, path):
return json.loads(self.read(path))
<fim_middle>"""
Log a patched function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param example:
:return:
""" | """
Log a patched function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param example:
:return:
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/persistence/filter/filesystem_bloom.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def get_patch_location_for_function(self, func_hash, extension: Union[
ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str:
"""
Get the local location of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
return os.path.join(self.log_directory, func_hash + extension)
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def load_dataset(self, dataset_type, func_hash, return_type="both") -> Optional[int]:
"""
Get the size of the dataset for a function hash
"""
log_directory = self._get_log_directory()
dataset_type_map = {"alignments": ALIGN_FILE_EXTENSION,
"positive": POSITIVE_FILE_EXTENSION,
"negative": NEGATIVE_FILE_EXTENSION,
"patches": PATCH_FILE_EXTENSION}
log_file_path = os.path.join(log_directory, func_hash + dataset_type_map[dataset_type])
if not os.path.exists(log_file_path):
if return_type == "both":
return 0, None
elif return_type == "dataset":
return None
elif return_type == "length":
return 0
try:
with open(log_file_path, "rb") as f:
dataset = f.read()
dataset_string = repr(dataset)
dataset_length = dataset_string.count("\\n") - dataset_string.count("\\\\n")
if return_type == "both":
return dataset_length, dataset
elif return_type == "dataset":
return dataset
elif return_type == "length":
return dataset_length
except Exception as e:
if return_type == "both":
return 0, None
elif return_type == "dataset":
return None
elif return_type == "length":
return 0
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def read(self, path: str) -> str:
"""
Read data from a file
"""
with open(path, "r") as f:
return f.read()
"""
import os
from bitarray._bitarray import bitarray
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
class BloomFilterFileSystemDriver(IBloomFilterPersistence):
"""
This is a Filesystem implementation of a Bloom Filter persistence layer.
"""
def __init__(self, log_directory: str):
self.log_directory = log_directory
def save(self, bit_array: bitarray) -> None:
"""
Write a bloom filter array of bits to the local filesystem.
:param bloom_filter: A bloom filter which tracks unique function invocations
"""
bloom_filter_path = os.path.join(self.log_directory, 'bloom_filter_state.bin')
# Append 0 bits to make the length a multiple of 8
while len(bit_array) % 8 != 0:
bit_array.append(0)
with open(bloom_filter_path, 'wb') as f:
f.write(bit_array.tobytes())
def load(self) -> bitarray:
<fim_suffix>
bloom_filter_path = os.path.join(self.log_directory, 'bloom_filter_state.bin')
with open(bloom_filter_path, 'rb') as f:
bit_array = bitarray()
bit_array.frombytes(f.read())
while len(bit_array) % 8 != 0:
bit_array.append(0)
return bit_array<fim_middle>"""
Load a bloom filter from the local filesystem.
:return: A bloom filter object containing the state of unique function invocations
""" | """
Load a bloom filter from the local filesystem.
:return: A bloom filter object containing the state of unique function invocations
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/models/function_config.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/function_modeler.py
def _update_finetune_config(self, response: FinetuneJob, func_hash, function_description):
"""
Update the config file to reflect the new model and switch the current model to the finetuned model
"""
self.function_configs[func_hash].update_with_finetuned_response(response)
logging.info(f"Finetuning for {function_description.name} using {self.function_configs[func_hash].distilled_model.provider} finished with status: {response.status}."\
f" The id of the finetuned model is {response.fine_tuned_model.model_name}")
try:
self._update_config_file(func_hash)
except Exception as e:
logging.info(f"Could not update the function configuration file with the finetuned model for {function_description.name}. Error: {e}")
pass
# tanuki_py/src/tanuki/function_modeler.py
def get_models(self, function_description):
"""
Return the current model from the config file
"""
func_hash = function_description.__hash__()
if func_hash in self.function_configs:
func_config = self.function_configs[func_hash]
else:
func_config = self.load_function_config(func_hash, function_description)
return func_config.distilled_model, func_config.teacher_models
"""
from pydantic import BaseModel
from typing import Dict, List
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
from tanuki.language_models.llm_configs import DEFAULT_TEACHER_MODELS, DEFAULT_STUDENT_MODELS
from tanuki.constants import DEFAULT_TEACHER_MODEL_NAMES, DEFAULT_DISTILLED_MODEL_NAME, \
DISTILLED_MODEL, TEACHER_MODEL
from tanuki.language_models.llm_configs.model_config_factory import ModelConfigFactory
config_factory = ModelConfigFactory()
class FunctionConfig(BaseModel):
"""
The function config to execute the inference for the function and distillation.
Parameters
----------
distilled_model : BaseModelConfig -- the distilled model config
current_model_stats : Dict -- the current model stats
last_training_run : Dict -- the last training run
current_training_run : Dict -- the current training run
teacher_models : List[BaseModelConfig] -- the teacher models
nr_of_training_runs : int -- the number of training runs
"""
distilled_model: BaseModelConfig = DEFAULT_STUDENT_MODELS[DEFAULT_DISTILLED_MODEL_NAME]
current_model_stats : Dict = {
"trained_on_datapoints": 0,
"running_faults": []}
last_training_run : Dict = {"trained_on_datapoints": 0}
current_training_run : Dict = {}
teacher_models : List[BaseModelConfig] = [DEFAULT_TEACHER_MODELS[teacher_model_name] for teacher_model_name in DEFAULT_TEACHER_MODEL_NAMES]
nr_of_training_runs : int = 0
def load_from_dict(self, json_dict):
"""
Load the function config from a dict
Args:
json_dict: The dict to load the function config from
Returns:
The function config
"""
self.distilled_model = config_factory.create_config(json_dict["distilled_model"], DISTILLED_MODEL)
self.current_model_stats = json_dict["current_model_stats"]
self.last_training_run = json_dict["last_training_run"]
self.current_training_run = json_dict["current_training_run"]
self.nr_of_training_runs = json_dict["nr_of_training_runs"]
if "teacher_models" in json_dict and len(json_dict["teacher_models"]) > 0:
self.teacher_models = [config_factory.create_config(teacher_model, TEACHER_MODEL) for teacher_model in json_dict["teacher_models"]]
return self
def to_dict(self):
"""
Convert the function config to a dict
Returns:
The dict
"""
try:
config_dictionary = self.model_dump()
except AttributeError as e:
config_dictionary = self.dict()
return config_dictionary
def update_with_finetuned_response(self, response):
<fim_suffix>
if response.status == "failed":
self.current_training_run = {}
else:
self.distilled_model = response.fine_tuned_model
self.last_training_run = self.current_training_run
self.current_model_stats = {
"trained_on_datapoints": self.current_training_run[
"trained_on_datapoints"],
"running_faults": []}
self.nr_of_training_runs += 1
self.current_training_run = {}
<fim_middle>"""
Update the function config with the finetuned response
Args:
response: The finetuned response
""" | """
Update the function config with the finetuned response
Args:
response: The finetuned response
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def does_object_exist(self, path: str) -> bool:
"""
Check to see if a path exists on the filesystem.
:param path:
:return:
"""
return os.path.exists(path)
# tanuki_py/src/tanuki/static_assertion_visitor.py
def process_assert_helper_lr(self, left, right, iter_name=None, op=None):
input_args, input_kwargs = self.extract_args(left, iter_name)
if isinstance(op, In):
output = Or(self.extract_output(right))
else:
output = self.extract_output(right)
key = get_key(input_args, input_kwargs)
self.mocks[key] = output
# tanuki_py/src/tanuki/language_models/aws_bedrock_api.py
def __init__(self) -> None:
# initialise the abstract base class
super().__init__()
self.bedrock_runtime = None
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
<fim_suffix>
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>"""Validate base types.""" | """Validate base types.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/function_modeler.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/models/function_config.py
def update_with_finetuned_response(self, response):
"""
Update the function config with the finetuned response
Args:
response: The finetuned response
"""
if response.status == "failed":
self.current_training_run = {}
else:
self.distilled_model = response.fine_tuned_model
self.last_training_run = self.current_training_run
self.current_model_stats = {
"trained_on_datapoints": self.current_training_run[
"trained_on_datapoints"],
"running_faults": []}
self.nr_of_training_runs += 1
self.current_training_run = {}
# tanuki_py/src/tanuki/models/api_manager.py
def __getitem__(self,
provider: str) -> Any:
if provider not in self.api_providers:
self.add_api_provider(provider)
return self.api_providers[provider]
# tanuki_py/src/tanuki/trackers/abc_buffered_logger.py
def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool:
if positive:
log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION)
else:
log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
"""
import ast
import datetime
import io
import json
from typing import List, Tuple, Dict, Union
import logging
from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \
NEGATIVE_EMBEDDABLE_ALIGNMENTS, OPENAI_PROVIDER
from tanuki.models.function_type import FunctionType
from tanuki.language_models.llm_configs import DEFAULT_TEACHER_MODELS, DEFAULT_EMBEDDING_MODELS, DEFAULT_STUDENT_MODELS
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API
from tanuki.models.finetune_job import FinetuneJob
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.utils import approximate_token_count, prepare_object_for_saving, encode_int, decode_int
import copy
from tanuki.models.function_config import FunctionConfig
from tanuki.models.api_manager import APIManager
class FunctionModeler(object):
"""
This class manages the registered function models and their datasets
comprised of symbolic and embeddable alignments, and symbolic and embeddable patches
"""
def __init__(self, data_worker: DatasetWorker,
api_provider: APIManager,
environment_id=0,
) -> None:
self.function_configs = {}
self.data_worker = data_worker
self.distillation_token_limit = 3000 # the token limit for finetuning
self.symbolic_align_buffer = {}
self.embeddable_align_buffer = {}
self._get_datasets()
self.environment_id = environment_id
self.check_finetune_blacklist = []
self.execute_finetune_blacklist = []
self.store_data_blacklist = []
self.api_provider = api_provider
self.teacher_models_override = {}
self.student_model_override = {}
self.startup_logging_checker = {}
def _get_dataset_info(self, dataset_type, func_hash, type="length"):
"""
Get the dataset size for a function hash
"""
return self.data_worker.load_dataset(dataset_type, func_hash, return_type=type)
def _configure_function_models(self, teacher_models: List[Union[str, BaseModelConfig]],
student_model: str,
func_hash: str,
task_type: str):
"""
Configure the function models
"""
if teacher_models:
self._configure_teacher_models(teacher_models, func_hash, task_type)
if student_model:
self._configure_student_model(student_model, func_hash, task_type)
if teacher_models and not student_model:
for model_config in self.teacher_models_override[func_hash]:
# ban all non-openai models from finetuning if teacher is not openai and student is not specified because it doesnt make sense
if model_config.provider != OPENAI_PROVIDER and func_hash not in self.check_finetune_blacklist:
self.check_finetune_blacklist.append(func_hash)
if model_config.provider != OPENAI_PROVIDER and func_hash not in self.execute_finetune_blacklist:
self.execute_finetune_blacklist.append(func_hash)
def _configure_teacher_models(self,
teacher_models: List[Union[str, BaseModelConfig]],
func_hash: str,
task_type: str):
"""
Add custom teacher models to the function config
First this is added to the teacher_models_override dict, which is used to override the teacher models
Args:
teacher_models: A list of teacher models to use for the function hash
func_hash: The function hash to add the teacher models to
"""
if func_hash not in self.teacher_models_override:
self.teacher_models_override[func_hash] = []
if task_type == FunctionType.EMBEDDABLE:
preconfigured_models = DEFAULT_EMBEDDING_MODELS
elif task_type == FunctionType.SYMBOLIC:
preconfigured_models = DEFAULT_TEACHER_MODELS
for model in teacher_models:
if isinstance(model, str):
if model not in preconfigured_models:
raise Exception(f"Teacher model {model} not supported by default. Please include it in the list in extended config format")
model_config = preconfigured_models[model]
elif isinstance(model, BaseModelConfig):
model_config = model
self.teacher_models_override[func_hash].append(model_config)
def _configure_student_model(self,
student_model: str,
func_hash: str,
task_type: str):
"""
Add custom student models to the function config
First this is added to the teacher_models_override dict, which is used to override the teacher models
Args:
teacher_models: A list of teacher models to use for the function hash
func_hash: The function hash to add the teacher models to
"""
if task_type == FunctionType.EMBEDDABLE:
logging.info("Embeddable function type does not support student models")
preconfigured_models = DEFAULT_STUDENT_MODELS
if student_model not in preconfigured_models:
raise Exception(f"Student model {student_model} is currently not supported.")
model_config = preconfigured_models[student_model]
self.student_model_override[func_hash] = model_config
def _get_datasets(self):
"""
Get the existing datasets from the data worker
"""
self.dataset_sizes = self.data_worker.load_existing_datasets()
def save_embeddable_align_statements(self,
function_hash: str,
args,
kwargs,
positive_pairs: List[Tuple[List, Dict]],
negative_pairs: List[Tuple[List, Dict]]):
"""
Save the contrastive align statements for the embeddable function.
Do not save if the function hash is in the store data blacklist
Args:
function_hash: A unique hash for the function
args: The arguments of the function
kwargs: The keyword arguments of the function
positive_pairs: A list of the other function invocations that are should have equivalent embeddings
negative_pairs: A list of the other function invocations that are should have different embeddings
"""
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
# prepare positive pairs for saving
parsed_positive_pairs = []
for pair in positive_pairs:
copy_pair = copy.deepcopy(pair)
parsed_pair = prepare_object_for_saving(copy_pair)
parsed_positive_pairs.append(parsed_pair)
# prepare negative pairs for saving
parsed_negative_pairs = []
for pair in negative_pairs:
copy_pair = copy.deepcopy(pair)
parsed_pair = prepare_object_for_saving(copy_pair)
parsed_negative_pairs.append(parsed_pair)
# save the contrastive pairs
for pair in parsed_positive_pairs:
self._save_contrastive_alignment_pair(function_hash, parsed_args, parsed_kwargs, pair, positive=True)
for pair in parsed_negative_pairs:
self._save_contrastive_alignment_pair(function_hash, parsed_args, parsed_kwargs, pair, positive=False)
def _save_contrastive_alignment_pair(self, function_hash: str, args, kwargs, pair, positive=True):
"""
Save a contrastive pair
"""
example = FunctionExample(args, kwargs, pair)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_embeddable_align(function_hash, example, positive)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if positive:
if function_hash in self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if not positive:
if function_hash in self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.embeddable_align_buffer:
self.embeddable_align_buffer[function_hash] = bytearray()
self.embeddable_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
def save_symbolic_align_statements(self, function_hash, args, kwargs, output):
"""
Save the align statements and add to the align buffer
Do not save if the function hash is in the store data blacklist
Then just add the datapoints to the align buffer
"""
# prepare output for saving and later parsing
# make a deepcopy of the output to avoid changing the original object
copy_output = copy.deepcopy(output)
parsed_output = prepare_object_for_saving(copy_output)
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
example = FunctionExample(parsed_args, parsed_kwargs, parsed_output)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_symbolic_align(function_hash, example)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if function_hash in self.dataset_sizes[SYMBOLIC_ALIGNMENTS]:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.symbolic_align_buffer:
self.symbolic_align_buffer[function_hash] = bytearray()
self.symbolic_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
def save_symbolic_datapoint(self, func_hash, example):
"""
Save datapoint to the training data
"""
written_datapoints = self.data_worker.log_symbolic_patch(func_hash, example)
for func_hash, datapoints in written_datapoints.items():
if func_hash in self.dataset_sizes[PATCHES]:
# if the dataset size is -1, it means we havent read in the dataset size yet
if self.dataset_sizes[PATCHES][func_hash] == -1:
self.dataset_sizes[PATCHES][func_hash] = self._get_dataset_info(PATCHES, func_hash, type="length")
else:
self.dataset_sizes[PATCHES][func_hash] += datapoints
else:
self.dataset_sizes[PATCHES][func_hash] = datapoints
return len(written_datapoints) > 0
def get_symbolic_alignments(self, func_hash, max=20):
"""
Get all symbolic aligns for a function hash
"""
if func_hash not in self.symbolic_align_buffer:
return []
buffer = self.symbolic_align_buffer[func_hash]
return self._get_examples_from_alignment_buffer(buffer, max)
def get_embeddable_alignments(self, func_hash, max=20):
"""
Get all embeddable aligns for a function hash
"""
if func_hash not in self.embeddable_align_buffer:
return []
buffer = self.embeddable_align_buffer[func_hash]
return self._get_examples_from_alignment_buffer(buffer, max)
def _get_examples_from_alignment_buffer(self, buffer, max=20):
"""
Get examples from a buffer
"""
split_buffer = bytes(buffer).split(b"\n")
# byte array of stringed python dicts into dict objects
example_set = set()
for example in split_buffer:
if example == b"":
continue
example_set.add(example)
# easy and straightforward way to get nr of words (not perfect but doesnt need to be)
# Can do the proper way of tokenizing later, it might be slower and we dont need 100% accuracy
example_element_limit = EXAMPLE_ELEMENT_LIMIT
examples = []
for example_bytes in split_buffer:
if example_bytes in example_set:
nr_of_elements = approximate_token_count(example_bytes)
example_element_limit -= nr_of_elements
if example_element_limit < 0:
break
example = example_bytes.decode('utf-8')
# json load the example
try:
example = json.loads(example)
except:
example = ast.literal_eval(example)
examples.append(example)
example_set.remove(example_bytes)
return list(examples)[:max]
def load_symbolic_align_statements(self, function_hash):
"""
Load all align statements
First check the data storage blacklist,
if the func hash is in the blacklist, then set the dataset size to 0 and the align buffer to empty bytearray
"""
if function_hash in self.store_data_blacklist:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 0
self.symbolic_align_buffer[function_hash] = bytearray()
elif function_hash not in self.symbolic_align_buffer:
dataset_size, align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, function_hash, type="both")
if align_dataset:
self.symbolic_align_buffer[function_hash] = bytearray(align_dataset)
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = dataset_size
def postprocess_symbolic_datapoint(self, func_hash, function_description, example, repaired=True):
"""
Postprocess the datapoint
First check if the datapoint should be added to the training data
Add the datapoint if it should be added
Then check if the function should be finetuned and execute finetuning if it should
"""
try:
if func_hash not in self.store_data_blacklist:
added = self.save_symbolic_datapoint(func_hash, example)
if added:
self._update_datapoint_config(repaired, func_hash)
except Exception as e:
print(e)
print("Could not add datapoint to training data")
if func_hash not in self.execute_finetune_blacklist:
self.check_for_finetuning(function_description, func_hash)
def load_function_config(self, func_hash, function_description):
"""
Load the config file for a function hash
"""
config, default = self.data_worker.load_function_config(func_hash)
if func_hash in self.student_model_override and config.distilled_model.model_name == "":
config.distilled_model = self.student_model_override[func_hash]
if default and func_hash not in self.check_finetune_blacklist:
finetuned, finetune_config = self._check_for_finetunes(function_description, config.distilled_model)
if finetuned:
config = finetune_config
# update teachers if not default
if func_hash in self.teacher_models_override:
config.teacher_models = self.teacher_models_override[func_hash]
self.function_configs[func_hash] = config
return config
def _check_for_finetunes(self, function_description: FunctionDescription, model_config : BaseModelConfig) -> Tuple[bool, Dict]:
# hash the function_hash into 16 characters (to embed it into the name of OpenAI finetunes, for later retrieval)
logging.info(f"Checking for finetunes for {function_description.name} using {model_config.provider}")
finetune_hash = function_description.__hash__(purpose="finetune") + encode_int(self.environment_id)
# List 10 fine-tuning jobs
finetunes: List[FinetuneJob] = self.api_provider[model_config.provider].list_finetuned(model_config, limit=1000)
# Check if the function_hash is in the fine-tuning jobs
# the finetunes are in chronological order starting from newest
# So this gets the latest finetune
for finetune in finetunes:
# check if the finetune hash is in the fine-tuned model name
if finetune.status == "succeeded" and finetune_hash in finetune.fine_tuned_model.model_name:
try:
config = self._construct_config_from_finetune(finetune_hash, finetune)
# save the config
self.data_worker.update_function_config(function_description.__hash__(), config)
logging.info(f"Found finetuned model for {function_description.name} [{config.distilled_model.model_name}]")
return True, config
except:
logging.info(f"Found finetuned model for {function_description.name} [{finetune.fine_tuned_model.model_name}] but could not load it")
return False, {}
logging.info(f"No finetuned model found for {function_description.name}")
return False, {}
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
def get_models(self, function_description):
<fim_suffix>
func_hash = function_description.__hash__()
if func_hash in self.function_configs:
func_config = self.function_configs[func_hash]
else:
func_config = self.load_function_config(func_hash, function_description)
return func_config.distilled_model, func_config.teacher_models
def _update_datapoint_config(self, repaired, func_hash):
"""
Update the config to reflect the new datapoint in the training data
First adds 1 to the current datapoints
Then updates running faults depending if priority is True or not and takes last 100
Then checks the revert condition, i.e if last 10 datapoints are 50% faulty
Finally updates the config file
Args:
priority (bool): whether the datapoint was fixed by the teacher model/should be added to the training data
"""
try:
if repaired:
self.function_configs[func_hash].current_model_stats["running_faults"].append(1)
else:
self.function_configs[func_hash].current_model_stats["running_faults"].append(0)
# take the last 100 datapoints
self.function_configs[func_hash].current_model_stats["running_faults"] = \
self.function_configs[func_hash].current_model_stats["running_faults"][-100:]
# check if the last 10 datapoints are 50% faulty, this is the switch condition
if sum(self.function_configs[func_hash].current_model_stats["running_faults"][-10:]) / 10 > 0.5:
self.function_configs[func_hash].distilled_model.model_name = ""
self.function_configs[func_hash].current_model_stats["trained_on_datapoints"] = 0
self.function_configs[func_hash].current_model_stats["running_faults"] = []
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file")
pass
def _update_config_file(self, func_hash):
self.data_worker.update_function_config(func_hash, self.function_configs[func_hash])
def check_for_finetuning(self, function_description, func_hash):
"""
Check for finetuning status
If already finetuning, check for finetuning status
If not finetuning, check for finetuning condition and execute finetuning if condition is met
"""
try:
# check if already finetuning
if "job_id" in self.function_configs[func_hash].current_training_run:
# check for job status
self._check_finetuning_status(func_hash, function_description)
else:
# check for finetuning condition
if self._check_finetuning_condition(func_hash, function_description):
self._execute_finetuning(function_description, func_hash)
except Exception as e:
print(e)
print("Error checking for finetuning")
def _check_finetuning_condition(self, func_hash, function_description):
"""
Check if the finetuning condition is met
Currently finetuning condition is dependent on the number of symbolic datapoints since last finetuning
"""
if func_hash not in self.function_configs:
return False
training_threshold = (2 ** self.function_configs[func_hash].nr_of_training_runs) * 200
align_dataset_size = self.dataset_sizes[SYMBOLIC_ALIGNMENTS][func_hash] if func_hash in self.dataset_sizes[
SYMBOLIC_ALIGNMENTS] else 0
patch_dataset_size = self.dataset_sizes[PATCHES][func_hash] if func_hash in self.dataset_sizes[PATCHES] else 0
if patch_dataset_size == -1:
# if havent read in the patch dataset size, read it in
patch_dataset_size = self._get_dataset_info(PATCHES, func_hash, type="length")
self.dataset_sizes[PATCHES][func_hash] = patch_dataset_size
if func_hash not in self.startup_logging_checker:
logging.info(f"Function {function_description.name} [{align_dataset_size} aligns | {patch_dataset_size} runs] will be finetuned from"\
f" {self.function_configs[func_hash].teacher_models[0].model_name} using {self.function_configs[func_hash].distilled_model.provider} in "\
f"{training_threshold-(patch_dataset_size + align_dataset_size)} runs")
self.startup_logging_checker[func_hash] = True
return (patch_dataset_size + align_dataset_size) > training_threshold
def _execute_finetuning(self, function_description, func_hash):
"""
Execute the finetuning
First create the OpenAI compatible dataset with jsonL file and upload it
Then submit the OpenAI finetuning job
Finally update the config file to reflect the new finetuning job as current
"""
# get function description
function_string = str(function_description.__dict__.__repr__() + "\n")
# get the align dataset
align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, func_hash, type="dataset")
if not align_dataset:
align_dataset = ""
else:
align_dataset = align_dataset.decode('utf-8')
# get the patch dataset
patch_dataset = self._get_dataset_info(PATCHES, func_hash, type="dataset")
if not patch_dataset:
patch_dataset = ""
else:
patch_dataset = patch_dataset.decode('utf-8')
if align_dataset == "" and patch_dataset == "":
return
dataset = align_dataset + patch_dataset
dataset.replace("\\n", "[SEP_TOKEN]")
dataset = dataset.split("\n")
dataset = [x.replace("[SEP_TOKEN]", "\\n") for x in dataset if x != ""]
# read in the dataset file
dataset = [ast.literal_eval(x) for x in dataset]
#
# create the openai dataset
instruction = "You are given below a function description and input data. The function description of what the function must carry out can be found in the Function section, with input and output type hints. The input data can be found in Input section. Using the function description, apply the function to the Input and return a valid output type, that is acceptable by the output_class_definition and output_class_hint. Return None if you can't apply the function to the input or if the output is optional and the correct output is None.\nINCREDIBLY IMPORTANT: Only output a JSON-compatible string in the correct response format."
finetuning_dataset = [{"messages": [
{
"role": "system",
"content": f"You are a skillful and accurate language model, who applies a described function on input data. Make sure the function is applied accurately and correctly and the outputs follow the output type hints and are valid outputs given the output types."
},
{"role": "user",
"content": f"{instruction}\nFunction: {function_string}---\nInputs:\nArgs: {x['args']}\nKwargs: {x['kwargs']}\nOutput:"},
{"role": "assistant", "content": str(x['output']) if x['output'] is not None else "None"}]}
for x in dataset]
# Create an in-memory text stream
temp_file = io.BytesIO()
# Write data to the stream
for idx, item in enumerate(finetuning_dataset):
temp_file.write(json.dumps(item).encode('utf-8'))
if idx != len(finetuning_dataset) - 1:
temp_file.write("\n".encode('utf-8'))
# Reset the stream position to the beginning
temp_file.seek(0)
# create the finetune hash
finetune_hash = function_description.__hash__(purpose="finetune")
nr_of_training_runs = self.function_configs[func_hash].nr_of_training_runs
finetune_hash += encode_int(self.environment_id)
finetune_hash += encode_int(nr_of_training_runs)
# here can be sure that datasets were read in as that is checked in the finetune_check
align_dataset_size = self.dataset_sizes[SYMBOLIC_ALIGNMENTS][func_hash] if func_hash in self.dataset_sizes[
SYMBOLIC_ALIGNMENTS] else 0
patch_dataset_size = self.dataset_sizes[PATCHES][func_hash] if func_hash in self.dataset_sizes[PATCHES] else 0
total_dataset_size = align_dataset_size + patch_dataset_size
# Use the stream as a file
try:
finetune_provider = self.function_configs[func_hash].distilled_model.provider
logging.info(f"Starting finetuning for {function_description.name} using {finetune_provider} for {self.function_configs[func_hash].distilled_model.base_model_for_sft}")
finetuning_response: FinetuneJob = self.api_provider[finetune_provider].finetune(file=temp_file,
suffix=finetune_hash,
model_config = self.function_configs[func_hash].distilled_model,)
except Exception as e:
logging.info(f"Could not start finetuning for {function_description.name} using {finetune_provider}. Error: {e}")
return
self.function_configs[func_hash].current_training_run = {"job_id": finetuning_response.id,
"trained_on_datapoints": total_dataset_size,
"last_checked": datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")}
# update the config json file
try:
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file to register a finetuning run")
def _check_finetuning_status(self, func_hash, function_description):
"""
Check the status of the current finetuning job
If the job is finished, update the config file to reflect the new model
"""
job_id = self.function_configs[func_hash].current_training_run["job_id"]
last_checked = self.function_configs[func_hash].current_training_run["last_checked"]
# check if last checked was more than 30 mins ago
if (datetime.datetime.now() - datetime.datetime.strptime(last_checked,
"%Y-%m-%d %H:%M:%S")).total_seconds() > 1800:
finetune_provider = self.function_configs[func_hash].distilled_model.provider
response = self.api_provider[finetune_provider].get_finetuned(job_id, model_config = self.function_configs[func_hash].distilled_model)
self.function_configs[func_hash].current_training_run["last_checked"] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")
if response.status == "succeeded" or response.status == "failed":
self._update_finetune_config(response, func_hash, function_description)
else:
self._update_config_file(func_hash)
def _update_finetune_config(self, response: FinetuneJob, func_hash, function_description):
"""
Update the config file to reflect the new model and switch the current model to the finetuned model
"""
self.function_configs[func_hash].update_with_finetuned_response(response)
logging.info(f"Finetuning for {function_description.name} using {self.function_configs[func_hash].distilled_model.provider} finished with status: {response.status}."\
f" The id of the finetuned model is {response.fine_tuned_model.model_name}")
try:
self._update_config_file(func_hash)
except Exception as e:
logging.info(f"Could not update the function configuration file with the finetuned model for {function_description.name}. Error: {e}")
pass
<fim_middle>"""
Return the current model from the config file
""" | """
Return the current model from the config file
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/abc_buffered_logger.py
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
"""
Get an instance of the bloom filter persistence provider. This exposes some persistent file storage,
that must support reading and writing raw byte streams.
:return:
"""
pass
# tanuki_py/src/tanuki/trackers/abc_buffered_logger.py
def log_symbolic_align(self, func_hash, *args, **kws):
"""
Log an align function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param args: Example objects
:param kws:
:return:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
example = args[0]
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_symbolic_align_call(func_hash, example)
return successfully_saved, new_datapoint
# tanuki_py/src/tanuki/trackers/abc_buffered_logger.py
def get_patch_location_for_function(self, func_hash, extension="") -> str:
"""
Get the address of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
pass
"""
import os
from enum import Enum
from typing import Literal, Union, Optional, Dict
from appdirs import user_data_dir
from tanuki.constants import *
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.persistence.filter.filesystem_bloom import BloomFilterFileSystemDriver
from tanuki.trackers.abc_buffered_logger import ABCBufferedLogger
class FilesystemBufferedLogger(ABCBufferedLogger):
"""
A class that handles the reading and writing of patch invocations and align statements.
It includes the logic for a bloom filter, to ensure that we only store unique invocations.
"""
def __init__(self, name, level=15):
self.log_directory = self._get_log_directory()
super().__init__(name, level)
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
<fim_suffix>
return BloomFilterFileSystemDriver(log_directory=self.log_directory)
def get_patch_location_for_function(self, func_hash, extension: Union[
ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str:
"""
Get the local location of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
return os.path.join(self.log_directory, func_hash + extension)
def ensure_persistence_location_exists(self) -> None:
"""
Ensure that the location on the filesystem we will be writing to actually exists. If not, create it.
"""
log_directory = self.log_directory
# Create the folder if it doesn't exist
if not os.path.exists(log_directory):
os.makedirs(log_directory)
def does_object_exist(self, path: str) -> bool:
"""
Check to see if a path exists on the filesystem.
:param path:
:return:
"""
return os.path.exists(path)
def _get_log_directory(self) -> str:
"""
Find a location on the filesystem to write our logs to.
:return:
"""
filename = "functions"
# If explicitly defined
env_dir = os.getenv(ENVVAR)
if env_dir and os.path.isdir(env_dir):
return os.path.join(env_dir, filename)
# If installed as a library
library_dir = os.path.join(user_data_dir(LIB_NAME), filename)
if os.path.isdir(library_dir) or not os.path.exists(library_dir):
return library_dir
# If installed in a project that contains a git repo - place it in the same folder as the git repo
current_dir = os.getcwd()
while current_dir != os.path.root:
if ".git" in os.listdir(current_dir):
return os.path.join(current_dir, filename)
current_dir = os.path.dirname(current_dir)
return os.path.join(os.getcwd(), filename)
def load_dataset(self, dataset_type, func_hash, return_type="both") -> Optional[int]:
"""
Get the size of the dataset for a function hash
"""
log_directory = self._get_log_directory()
dataset_type_map = {"alignments": ALIGN_FILE_EXTENSION,
"positive": POSITIVE_FILE_EXTENSION,
"negative": NEGATIVE_FILE_EXTENSION,
"patches": PATCH_FILE_EXTENSION}
log_file_path = os.path.join(log_directory, func_hash + dataset_type_map[dataset_type])
if not os.path.exists(log_file_path):
if return_type == "both":
return 0, None
elif return_type == "dataset":
return None
elif return_type == "length":
return 0
try:
with open(log_file_path, "rb") as f:
dataset = f.read()
dataset_string = repr(dataset)
dataset_length = dataset_string.count("\\n") - dataset_string.count("\\\\n")
if return_type == "both":
return dataset_length, dataset
elif return_type == "dataset":
return dataset
elif return_type == "length":
return dataset_length
except Exception as e:
if return_type == "both":
return 0, None
elif return_type == "dataset":
return None
elif return_type == "length":
return 0
def load_existing_datasets(self) -> Dict[str, Dict[str, str]]:
log_directory = self.log_directory
dataset_lengths = {
SYMBOLIC_ALIGNMENTS: {},
POSITIVE_EMBEDDABLE_ALIGNMENTS: {},
NEGATIVE_EMBEDDABLE_ALIGNMENTS: {},
PATCHES: {},
}
try:
if not os.path.exists(log_directory):
os.makedirs(log_directory)
# get all the files in the log directory
files = os.listdir(log_directory)
# discard all .json files
files = [x for x in files if ".json" not in x]
except Exception as e:
return dataset_lengths
for file in files:
if ALIGN_FILE_EXTENSION not in file \
and PATCH_FILE_EXTENSION not in file \
and POSITIVE_FILE_EXTENSION not in file \
and NEGATIVE_FILE_EXTENSION not in file:
continue
elif ALIGN_FILE_EXTENSION in file:
dataset_type = SYMBOLIC_ALIGNMENTS
elif POSITIVE_FILE_EXTENSION in file:
dataset_type = POSITIVE_EMBEDDABLE_ALIGNMENTS
elif NEGATIVE_FILE_EXTENSION in file:
dataset_type = NEGATIVE_EMBEDDABLE_ALIGNMENTS
else:
dataset_type = PATCHES
func_hash = file.replace(ALIGN_FILE_EXTENSION, "").replace(PATCH_FILE_EXTENSION, "")
dataset_lengths[dataset_type][func_hash] = -1
return dataset_lengths
def write(self, path: str, data: str, mode: Literal["w", "a", "a+b"] = "w") -> None:
"""
Write data to a file
"""
with open(path, mode) as f:
f.write(data)
def read(self, path: str) -> str:
"""
Read data from a file
"""
with open(path, "r") as f:
return f.read()
def get_hash_from_path(self, path) -> str:
"""
Given a path with a hash, return only the hash
:param path: The path to the file
:return: The hash
"""
return path.replace(PATCH_FILE_EXTENSION, ""). \
replace(self.log_directory, ""). \
lstrip("/"). \
lstrip("\\")
<fim_middle>"""
Get an instance of the bloom filter persistence provider. Typically this will be a file system provider.
:return: A persistence provider
""" | """
Get an instance of the bloom filter persistence provider. Typically this will be a file system provider.
:return: A persistence provider
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/function_modeler.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/models/function_config.py
def update_with_finetuned_response(self, response):
"""
Update the function config with the finetuned response
Args:
response: The finetuned response
"""
if response.status == "failed":
self.current_training_run = {}
else:
self.distilled_model = response.fine_tuned_model
self.last_training_run = self.current_training_run
self.current_model_stats = {
"trained_on_datapoints": self.current_training_run[
"trained_on_datapoints"],
"running_faults": []}
self.nr_of_training_runs += 1
self.current_training_run = {}
# tanuki_py/src/tanuki/register.py
def get_class_definition(class_type):
"""Helper function to get class definition source if not a built-in type"""
if hasattr(class_type, "__origin__"): # Check if it's a generic type
origin_type = class_type.__origin__
if origin_type is Literal: # Handle Literal case
return [literal for literal in class_type.__args__]
elif hasattr(class_type, "__args__"): # Access inner types
return [get_class_definition(arg) for arg in class_type.__args__ if arg is not None]
elif inspect.isclass(class_type) and class_type.__module__ != "builtins":
return get_source(class_type)
return class_type.__name__
# tanuki_py/src/tanuki/__init__.py
def generate_from_embedding_model_manager(function_description):
choice_parsed = []
instantiated = function_description.output_type_hint(choice_parsed)
return instantiated
"""
import ast
import datetime
import io
import json
from typing import List, Tuple, Dict, Union
import logging
from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \
NEGATIVE_EMBEDDABLE_ALIGNMENTS, OPENAI_PROVIDER
from tanuki.models.function_type import FunctionType
from tanuki.language_models.llm_configs import DEFAULT_TEACHER_MODELS, DEFAULT_EMBEDDING_MODELS, DEFAULT_STUDENT_MODELS
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API
from tanuki.models.finetune_job import FinetuneJob
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.utils import approximate_token_count, prepare_object_for_saving, encode_int, decode_int
import copy
from tanuki.models.function_config import FunctionConfig
from tanuki.models.api_manager import APIManager
class FunctionModeler(object):
"""
This class manages the registered function models and their datasets
comprised of symbolic and embeddable alignments, and symbolic and embeddable patches
"""
def __init__(self, data_worker: DatasetWorker,
api_provider: APIManager,
environment_id=0,
) -> None:
self.function_configs = {}
self.data_worker = data_worker
self.distillation_token_limit = 3000 # the token limit for finetuning
self.symbolic_align_buffer = {}
self.embeddable_align_buffer = {}
self._get_datasets()
self.environment_id = environment_id
self.check_finetune_blacklist = []
self.execute_finetune_blacklist = []
self.store_data_blacklist = []
self.api_provider = api_provider
self.teacher_models_override = {}
self.student_model_override = {}
self.startup_logging_checker = {}
def _get_dataset_info(self, dataset_type, func_hash, type="length"):
"""
Get the dataset size for a function hash
"""
return self.data_worker.load_dataset(dataset_type, func_hash, return_type=type)
def _configure_function_models(self, teacher_models: List[Union[str, BaseModelConfig]],
student_model: str,
func_hash: str,
task_type: str):
"""
Configure the function models
"""
if teacher_models:
self._configure_teacher_models(teacher_models, func_hash, task_type)
if student_model:
self._configure_student_model(student_model, func_hash, task_type)
if teacher_models and not student_model:
for model_config in self.teacher_models_override[func_hash]:
# ban all non-openai models from finetuning if teacher is not openai and student is not specified because it doesnt make sense
if model_config.provider != OPENAI_PROVIDER and func_hash not in self.check_finetune_blacklist:
self.check_finetune_blacklist.append(func_hash)
if model_config.provider != OPENAI_PROVIDER and func_hash not in self.execute_finetune_blacklist:
self.execute_finetune_blacklist.append(func_hash)
def _configure_teacher_models(self,
teacher_models: List[Union[str, BaseModelConfig]],
func_hash: str,
task_type: str):
"""
Add custom teacher models to the function config
First this is added to the teacher_models_override dict, which is used to override the teacher models
Args:
teacher_models: A list of teacher models to use for the function hash
func_hash: The function hash to add the teacher models to
"""
if func_hash not in self.teacher_models_override:
self.teacher_models_override[func_hash] = []
if task_type == FunctionType.EMBEDDABLE:
preconfigured_models = DEFAULT_EMBEDDING_MODELS
elif task_type == FunctionType.SYMBOLIC:
preconfigured_models = DEFAULT_TEACHER_MODELS
for model in teacher_models:
if isinstance(model, str):
if model not in preconfigured_models:
raise Exception(f"Teacher model {model} not supported by default. Please include it in the list in extended config format")
model_config = preconfigured_models[model]
elif isinstance(model, BaseModelConfig):
model_config = model
self.teacher_models_override[func_hash].append(model_config)
def _configure_student_model(self,
student_model: str,
func_hash: str,
task_type: str):
"""
Add custom student models to the function config
First this is added to the teacher_models_override dict, which is used to override the teacher models
Args:
teacher_models: A list of teacher models to use for the function hash
func_hash: The function hash to add the teacher models to
"""
if task_type == FunctionType.EMBEDDABLE:
logging.info("Embeddable function type does not support student models")
preconfigured_models = DEFAULT_STUDENT_MODELS
if student_model not in preconfigured_models:
raise Exception(f"Student model {student_model} is currently not supported.")
model_config = preconfigured_models[student_model]
self.student_model_override[func_hash] = model_config
def _get_datasets(self):
"""
Get the existing datasets from the data worker
"""
self.dataset_sizes = self.data_worker.load_existing_datasets()
def save_embeddable_align_statements(self,
function_hash: str,
args,
kwargs,
positive_pairs: List[Tuple[List, Dict]],
negative_pairs: List[Tuple[List, Dict]]):
"""
Save the contrastive align statements for the embeddable function.
Do not save if the function hash is in the store data blacklist
Args:
function_hash: A unique hash for the function
args: The arguments of the function
kwargs: The keyword arguments of the function
positive_pairs: A list of the other function invocations that are should have equivalent embeddings
negative_pairs: A list of the other function invocations that are should have different embeddings
"""
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
# prepare positive pairs for saving
parsed_positive_pairs = []
for pair in positive_pairs:
copy_pair = copy.deepcopy(pair)
parsed_pair = prepare_object_for_saving(copy_pair)
parsed_positive_pairs.append(parsed_pair)
# prepare negative pairs for saving
parsed_negative_pairs = []
for pair in negative_pairs:
copy_pair = copy.deepcopy(pair)
parsed_pair = prepare_object_for_saving(copy_pair)
parsed_negative_pairs.append(parsed_pair)
# save the contrastive pairs
for pair in parsed_positive_pairs:
self._save_contrastive_alignment_pair(function_hash, parsed_args, parsed_kwargs, pair, positive=True)
for pair in parsed_negative_pairs:
self._save_contrastive_alignment_pair(function_hash, parsed_args, parsed_kwargs, pair, positive=False)
def _save_contrastive_alignment_pair(self, function_hash: str, args, kwargs, pair, positive=True):
"""
Save a contrastive pair
"""
example = FunctionExample(args, kwargs, pair)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_embeddable_align(function_hash, example, positive)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if positive:
if function_hash in self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if not positive:
if function_hash in self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.embeddable_align_buffer:
self.embeddable_align_buffer[function_hash] = bytearray()
self.embeddable_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
def save_symbolic_align_statements(self, function_hash, args, kwargs, output):
"""
Save the align statements and add to the align buffer
Do not save if the function hash is in the store data blacklist
Then just add the datapoints to the align buffer
"""
# prepare output for saving and later parsing
# make a deepcopy of the output to avoid changing the original object
copy_output = copy.deepcopy(output)
parsed_output = prepare_object_for_saving(copy_output)
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
example = FunctionExample(parsed_args, parsed_kwargs, parsed_output)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_symbolic_align(function_hash, example)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if function_hash in self.dataset_sizes[SYMBOLIC_ALIGNMENTS]:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.symbolic_align_buffer:
self.symbolic_align_buffer[function_hash] = bytearray()
self.symbolic_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
def save_symbolic_datapoint(self, func_hash, example):
"""
Save datapoint to the training data
"""
written_datapoints = self.data_worker.log_symbolic_patch(func_hash, example)
for func_hash, datapoints in written_datapoints.items():
if func_hash in self.dataset_sizes[PATCHES]:
# if the dataset size is -1, it means we havent read in the dataset size yet
if self.dataset_sizes[PATCHES][func_hash] == -1:
self.dataset_sizes[PATCHES][func_hash] = self._get_dataset_info(PATCHES, func_hash, type="length")
else:
self.dataset_sizes[PATCHES][func_hash] += datapoints
else:
self.dataset_sizes[PATCHES][func_hash] = datapoints
return len(written_datapoints) > 0
def get_symbolic_alignments(self, func_hash, max=20):
"""
Get all symbolic aligns for a function hash
"""
if func_hash not in self.symbolic_align_buffer:
return []
buffer = self.symbolic_align_buffer[func_hash]
return self._get_examples_from_alignment_buffer(buffer, max)
def get_embeddable_alignments(self, func_hash, max=20):
"""
Get all embeddable aligns for a function hash
"""
if func_hash not in self.embeddable_align_buffer:
return []
buffer = self.embeddable_align_buffer[func_hash]
return self._get_examples_from_alignment_buffer(buffer, max)
def _get_examples_from_alignment_buffer(self, buffer, max=20):
"""
Get examples from a buffer
"""
split_buffer = bytes(buffer).split(b"\n")
# byte array of stringed python dicts into dict objects
example_set = set()
for example in split_buffer:
if example == b"":
continue
example_set.add(example)
# easy and straightforward way to get nr of words (not perfect but doesnt need to be)
# Can do the proper way of tokenizing later, it might be slower and we dont need 100% accuracy
example_element_limit = EXAMPLE_ELEMENT_LIMIT
examples = []
for example_bytes in split_buffer:
if example_bytes in example_set:
nr_of_elements = approximate_token_count(example_bytes)
example_element_limit -= nr_of_elements
if example_element_limit < 0:
break
example = example_bytes.decode('utf-8')
# json load the example
try:
example = json.loads(example)
except:
example = ast.literal_eval(example)
examples.append(example)
example_set.remove(example_bytes)
return list(examples)[:max]
def load_symbolic_align_statements(self, function_hash):
"""
Load all align statements
First check the data storage blacklist,
if the func hash is in the blacklist, then set the dataset size to 0 and the align buffer to empty bytearray
"""
if function_hash in self.store_data_blacklist:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 0
self.symbolic_align_buffer[function_hash] = bytearray()
elif function_hash not in self.symbolic_align_buffer:
dataset_size, align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, function_hash, type="both")
if align_dataset:
self.symbolic_align_buffer[function_hash] = bytearray(align_dataset)
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = dataset_size
def postprocess_symbolic_datapoint(self, func_hash, function_description, example, repaired=True):
"""
Postprocess the datapoint
First check if the datapoint should be added to the training data
Add the datapoint if it should be added
Then check if the function should be finetuned and execute finetuning if it should
"""
try:
if func_hash not in self.store_data_blacklist:
added = self.save_symbolic_datapoint(func_hash, example)
if added:
self._update_datapoint_config(repaired, func_hash)
except Exception as e:
print(e)
print("Could not add datapoint to training data")
if func_hash not in self.execute_finetune_blacklist:
self.check_for_finetuning(function_description, func_hash)
def load_function_config(self, func_hash, function_description):
<fim_suffix>
config, default = self.data_worker.load_function_config(func_hash)
if func_hash in self.student_model_override and config.distilled_model.model_name == "":
config.distilled_model = self.student_model_override[func_hash]
if default and func_hash not in self.check_finetune_blacklist:
finetuned, finetune_config = self._check_for_finetunes(function_description, config.distilled_model)
if finetuned:
config = finetune_config
# update teachers if not default
if func_hash in self.teacher_models_override:
config.teacher_models = self.teacher_models_override[func_hash]
self.function_configs[func_hash] = config
return config
def _check_for_finetunes(self, function_description: FunctionDescription, model_config : BaseModelConfig) -> Tuple[bool, Dict]:
# hash the function_hash into 16 characters (to embed it into the name of OpenAI finetunes, for later retrieval)
logging.info(f"Checking for finetunes for {function_description.name} using {model_config.provider}")
finetune_hash = function_description.__hash__(purpose="finetune") + encode_int(self.environment_id)
# List 10 fine-tuning jobs
finetunes: List[FinetuneJob] = self.api_provider[model_config.provider].list_finetuned(model_config, limit=1000)
# Check if the function_hash is in the fine-tuning jobs
# the finetunes are in chronological order starting from newest
# So this gets the latest finetune
for finetune in finetunes:
# check if the finetune hash is in the fine-tuned model name
if finetune.status == "succeeded" and finetune_hash in finetune.fine_tuned_model.model_name:
try:
config = self._construct_config_from_finetune(finetune_hash, finetune)
# save the config
self.data_worker.update_function_config(function_description.__hash__(), config)
logging.info(f"Found finetuned model for {function_description.name} [{config.distilled_model.model_name}]")
return True, config
except:
logging.info(f"Found finetuned model for {function_description.name} [{finetune.fine_tuned_model.model_name}] but could not load it")
return False, {}
logging.info(f"No finetuned model found for {function_description.name}")
return False, {}
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
def get_models(self, function_description):
"""
Return the current model from the config file
"""
func_hash = function_description.__hash__()
if func_hash in self.function_configs:
func_config = self.function_configs[func_hash]
else:
func_config = self.load_function_config(func_hash, function_description)
return func_config.distilled_model, func_config.teacher_models
def _update_datapoint_config(self, repaired, func_hash):
"""
Update the config to reflect the new datapoint in the training data
First adds 1 to the current datapoints
Then updates running faults depending if priority is True or not and takes last 100
Then checks the revert condition, i.e if last 10 datapoints are 50% faulty
Finally updates the config file
Args:
priority (bool): whether the datapoint was fixed by the teacher model/should be added to the training data
"""
try:
if repaired:
self.function_configs[func_hash].current_model_stats["running_faults"].append(1)
else:
self.function_configs[func_hash].current_model_stats["running_faults"].append(0)
# take the last 100 datapoints
self.function_configs[func_hash].current_model_stats["running_faults"] = \
self.function_configs[func_hash].current_model_stats["running_faults"][-100:]
# check if the last 10 datapoints are 50% faulty, this is the switch condition
if sum(self.function_configs[func_hash].current_model_stats["running_faults"][-10:]) / 10 > 0.5:
self.function_configs[func_hash].distilled_model.model_name = ""
self.function_configs[func_hash].current_model_stats["trained_on_datapoints"] = 0
self.function_configs[func_hash].current_model_stats["running_faults"] = []
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file")
pass
def _update_config_file(self, func_hash):
self.data_worker.update_function_config(func_hash, self.function_configs[func_hash])
def check_for_finetuning(self, function_description, func_hash):
"""
Check for finetuning status
If already finetuning, check for finetuning status
If not finetuning, check for finetuning condition and execute finetuning if condition is met
"""
try:
# check if already finetuning
if "job_id" in self.function_configs[func_hash].current_training_run:
# check for job status
self._check_finetuning_status(func_hash, function_description)
else:
# check for finetuning condition
if self._check_finetuning_condition(func_hash, function_description):
self._execute_finetuning(function_description, func_hash)
except Exception as e:
print(e)
print("Error checking for finetuning")
def _check_finetuning_condition(self, func_hash, function_description):
"""
Check if the finetuning condition is met
Currently finetuning condition is dependent on the number of symbolic datapoints since last finetuning
"""
if func_hash not in self.function_configs:
return False
training_threshold = (2 ** self.function_configs[func_hash].nr_of_training_runs) * 200
align_dataset_size = self.dataset_sizes[SYMBOLIC_ALIGNMENTS][func_hash] if func_hash in self.dataset_sizes[
SYMBOLIC_ALIGNMENTS] else 0
patch_dataset_size = self.dataset_sizes[PATCHES][func_hash] if func_hash in self.dataset_sizes[PATCHES] else 0
if patch_dataset_size == -1:
# if havent read in the patch dataset size, read it in
patch_dataset_size = self._get_dataset_info(PATCHES, func_hash, type="length")
self.dataset_sizes[PATCHES][func_hash] = patch_dataset_size
if func_hash not in self.startup_logging_checker:
logging.info(f"Function {function_description.name} [{align_dataset_size} aligns | {patch_dataset_size} runs] will be finetuned from"\
f" {self.function_configs[func_hash].teacher_models[0].model_name} using {self.function_configs[func_hash].distilled_model.provider} in "\
f"{training_threshold-(patch_dataset_size + align_dataset_size)} runs")
self.startup_logging_checker[func_hash] = True
return (patch_dataset_size + align_dataset_size) > training_threshold
def _execute_finetuning(self, function_description, func_hash):
"""
Execute the finetuning
First create the OpenAI compatible dataset with jsonL file and upload it
Then submit the OpenAI finetuning job
Finally update the config file to reflect the new finetuning job as current
"""
# get function description
function_string = str(function_description.__dict__.__repr__() + "\n")
# get the align dataset
align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, func_hash, type="dataset")
if not align_dataset:
align_dataset = ""
else:
align_dataset = align_dataset.decode('utf-8')
# get the patch dataset
patch_dataset = self._get_dataset_info(PATCHES, func_hash, type="dataset")
if not patch_dataset:
patch_dataset = ""
else:
patch_dataset = patch_dataset.decode('utf-8')
if align_dataset == "" and patch_dataset == "":
return
dataset = align_dataset + patch_dataset
dataset.replace("\\n", "[SEP_TOKEN]")
dataset = dataset.split("\n")
dataset = [x.replace("[SEP_TOKEN]", "\\n") for x in dataset if x != ""]
# read in the dataset file
dataset = [ast.literal_eval(x) for x in dataset]
#
# create the openai dataset
instruction = "You are given below a function description and input data. The function description of what the function must carry out can be found in the Function section, with input and output type hints. The input data can be found in Input section. Using the function description, apply the function to the Input and return a valid output type, that is acceptable by the output_class_definition and output_class_hint. Return None if you can't apply the function to the input or if the output is optional and the correct output is None.\nINCREDIBLY IMPORTANT: Only output a JSON-compatible string in the correct response format."
finetuning_dataset = [{"messages": [
{
"role": "system",
"content": f"You are a skillful and accurate language model, who applies a described function on input data. Make sure the function is applied accurately and correctly and the outputs follow the output type hints and are valid outputs given the output types."
},
{"role": "user",
"content": f"{instruction}\nFunction: {function_string}---\nInputs:\nArgs: {x['args']}\nKwargs: {x['kwargs']}\nOutput:"},
{"role": "assistant", "content": str(x['output']) if x['output'] is not None else "None"}]}
for x in dataset]
# Create an in-memory text stream
temp_file = io.BytesIO()
# Write data to the stream
for idx, item in enumerate(finetuning_dataset):
temp_file.write(json.dumps(item).encode('utf-8'))
if idx != len(finetuning_dataset) - 1:
temp_file.write("\n".encode('utf-8'))
# Reset the stream position to the beginning
temp_file.seek(0)
# create the finetune hash
finetune_hash = function_description.__hash__(purpose="finetune")
nr_of_training_runs = self.function_configs[func_hash].nr_of_training_runs
finetune_hash += encode_int(self.environment_id)
finetune_hash += encode_int(nr_of_training_runs)
# here can be sure that datasets were read in as that is checked in the finetune_check
align_dataset_size = self.dataset_sizes[SYMBOLIC_ALIGNMENTS][func_hash] if func_hash in self.dataset_sizes[
SYMBOLIC_ALIGNMENTS] else 0
patch_dataset_size = self.dataset_sizes[PATCHES][func_hash] if func_hash in self.dataset_sizes[PATCHES] else 0
total_dataset_size = align_dataset_size + patch_dataset_size
# Use the stream as a file
try:
finetune_provider = self.function_configs[func_hash].distilled_model.provider
logging.info(f"Starting finetuning for {function_description.name} using {finetune_provider} for {self.function_configs[func_hash].distilled_model.base_model_for_sft}")
finetuning_response: FinetuneJob = self.api_provider[finetune_provider].finetune(file=temp_file,
suffix=finetune_hash,
model_config = self.function_configs[func_hash].distilled_model,)
except Exception as e:
logging.info(f"Could not start finetuning for {function_description.name} using {finetune_provider}. Error: {e}")
return
self.function_configs[func_hash].current_training_run = {"job_id": finetuning_response.id,
"trained_on_datapoints": total_dataset_size,
"last_checked": datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")}
# update the config json file
try:
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file to register a finetuning run")
def _check_finetuning_status(self, func_hash, function_description):
"""
Check the status of the current finetuning job
If the job is finished, update the config file to reflect the new model
"""
job_id = self.function_configs[func_hash].current_training_run["job_id"]
last_checked = self.function_configs[func_hash].current_training_run["last_checked"]
# check if last checked was more than 30 mins ago
if (datetime.datetime.now() - datetime.datetime.strptime(last_checked,
"%Y-%m-%d %H:%M:%S")).total_seconds() > 1800:
finetune_provider = self.function_configs[func_hash].distilled_model.provider
response = self.api_provider[finetune_provider].get_finetuned(job_id, model_config = self.function_configs[func_hash].distilled_model)
self.function_configs[func_hash].current_training_run["last_checked"] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")
if response.status == "succeeded" or response.status == "failed":
self._update_finetune_config(response, func_hash, function_description)
else:
self._update_config_file(func_hash)
def _update_finetune_config(self, response: FinetuneJob, func_hash, function_description):
"""
Update the config file to reflect the new model and switch the current model to the finetuned model
"""
self.function_configs[func_hash].update_with_finetuned_response(response)
logging.info(f"Finetuning for {function_description.name} using {self.function_configs[func_hash].distilled_model.provider} finished with status: {response.status}."\
f" The id of the finetuned model is {response.fine_tuned_model.model_name}")
try:
self._update_config_file(func_hash)
except Exception as e:
logging.info(f"Could not update the function configuration file with the finetuned model for {function_description.name}. Error: {e}")
pass
<fim_middle>"""
Load the config file for a function hash
""" | """
Load the config file for a function hash
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
<fim_suffix>
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple) | if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/register.py
def get_class_definition(class_type):
"""Helper function to get class definition source if not a built-in type"""
if hasattr(class_type, "__origin__"): # Check if it's a generic type
origin_type = class_type.__origin__
if origin_type is Literal: # Handle Literal case
return [literal for literal in class_type.__args__]
elif hasattr(class_type, "__args__"): # Access inner types
return [get_class_definition(arg) for arg in class_type.__args__ if arg is not None]
elif inspect.isclass(class_type) and class_type.__module__ != "builtins":
return get_source(class_type)
return class_type.__name__
# tanuki_py/src/tanuki/__init__.py
def extract_attributes(result):
attributes = {}
# If the result is a list, get its length
if isinstance(result, list):
attributes['length'] = len(result)
# If the result is a dictionary, get its keys (or any other attributes)
elif isinstance(result, dict):
attributes['keys'] = list(result.keys())
return attributes
# tanuki_py/src/tanuki/function_modeler.py
def load_symbolic_align_statements(self, function_hash):
"""
Load all align statements
First check the data storage blacklist,
if the func hash is in the blacklist, then set the dataset size to 0 and the align buffer to empty bytearray
"""
if function_hash in self.store_data_blacklist:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 0
self.symbolic_align_buffer[function_hash] = bytearray()
elif function_hash not in self.symbolic_align_buffer:
dataset_size, align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, function_hash, type="both")
if align_dataset:
self.symbolic_align_buffer[function_hash] = bytearray(align_dataset)
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = dataset_size
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
<fim_suffix>
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base) | if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/utils.py
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
<fim_suffix>
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>if not isinstance(value, tuple):
return False | if not isinstance(value, tuple):
return False | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
<fim_suffix>
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items) | if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _save_contrastive_alignment_pair(self, function_hash: str, args, kwargs, pair, positive=True):
"""
Save a contrastive pair
"""
example = FunctionExample(args, kwargs, pair)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_embeddable_align(function_hash, example, positive)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if positive:
if function_hash in self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if not positive:
if function_hash in self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.embeddable_align_buffer:
self.embeddable_align_buffer[function_hash] = bytearray()
self.embeddable_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
# tanuki_py/src/tanuki/function_modeler.py
def save_symbolic_align_statements(self, function_hash, args, kwargs, output):
"""
Save the align statements and add to the align buffer
Do not save if the function hash is in the store data blacklist
Then just add the datapoints to the align buffer
"""
# prepare output for saving and later parsing
# make a deepcopy of the output to avoid changing the original object
copy_output = copy.deepcopy(output)
parsed_output = prepare_object_for_saving(copy_output)
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
example = FunctionExample(parsed_args, parsed_kwargs, parsed_output)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_symbolic_align(function_hash, example)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if function_hash in self.dataset_sizes[SYMBOLIC_ALIGNMENTS]:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.symbolic_align_buffer:
self.symbolic_align_buffer[function_hash] = bytearray()
self.symbolic_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def get_patch_location_for_function(self, func_hash, extension: Union[
ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str:
"""
Get the local location of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
return os.path.join(self.log_directory, func_hash + extension)
"""
import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, NEGATIVE_FILE_EXTENSION, PATCH_FILE_EXTENSION
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.models.function_config import FunctionConfig
# PATCH_FILE_EXTENSION_TYPE = Literal[".patches"]
# ALIGN_FILE_EXTENSION_TYPE = Literal[".alignments"]
# POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".positive_embedding"]
# NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".negative_embedding"]
#
# PATCH_FILE_EXTENSION: PATCH_FILE_EXTENSION_TYPE = ".patches"
# ALIGN_FILE_EXTENSION: ALIGN_FILE_EXTENSION_TYPE = ".alignments"
# POSITIVE_EMBEDDING_FILE_EXTENSION: POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_positives"
# NEGATIVE_EMBEDDING_FILE_EXTENSION: NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_negatives"
#
# EXPECTED_ITEMS = 10000
# FALSE_POSITIVE_RATE = 0.01
# LIB_NAME = "tanuki"
# ENVVAR = "TANUKI_LOG_DIR"
class ABCBufferedLogger(DatasetWorker):
def __init__(self, name, level=15):
self.buffers = {}
self.mapped_files = {}
self.miss_count = 0
self.hit_count = 0
self.flush_limit = {}
self.buffer_rolling_size = {}
self.write_count = 0
self.write_limit = 1000 # Save the Bloom filter every 1000 writes
super().__init__(name, level)
self.bloom_filter = self.create_bloom_filter()
self.load_bloom_filter()
self.default_function_config = FunctionConfig()
@abstractmethod
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
"""
Get an instance of the bloom filter persistence provider. This exposes some persistent file storage,
that must support reading and writing raw byte streams.
:return:
"""
pass
@abstractmethod
def load_existing_datasets(self) -> Dict[str, Dict[str, Any]]:
"""
Get the lengths of all datasets backing the registered functions, including aligns.
:return:
"""
pass
@abstractmethod
def ensure_persistence_location_exists(self):
"""
Ensure that the place we will be writing to actually exists. If not, create it.
"""
pass
@abstractmethod
def get_patch_location_for_function(self, func_hash, extension="") -> str:
"""
Get the address of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
pass
@abstractmethod
def write(self, path, data, mode="a") -> None:
pass
@abstractmethod
def read(self, path) -> str:
pass
@abstractmethod
def get_hash_from_path(self, path) -> str:
pass
@abstractmethod
def does_object_exist(self, path) -> bool:
pass
def create_bloom_filter(self):
bloom_filter_persistence = self.get_bloom_filter_persistence()
bloom_filter = BloomFilter(
bloom_filter_persistence,
expected_number_of_elements=EXPECTED_ITEMS,
false_positive_probability=FALSE_POSITIVE_RATE)
return bloom_filter
def load_bloom_filter(self):
try:
self.bloom_filter.load()
except FileNotFoundError:
self.debug("No Bloom filter found. Creating a new one.")
def write_symbolic_align_call(self, func_hash, example) -> bool:
log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool:
if positive:
log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION)
else:
log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def log_embeddable_align(self, func_hash, example, positive=True, **kws):
"""
Log a contrastive function invocation
Args:
func_hash: A string representation of the function signature and input parameters
example: The example object
positive: Whether the example is positive or negative
**kws:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_embeddable_align_call(func_hash, example, positive)
return successfully_saved, new_datapoint
def log_symbolic_align(self, func_hash, *args, **kws):
"""
Log an align function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param args: Example objects
:param kws:
:return:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
example = args[0]
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_symbolic_align_call(func_hash, example)
return successfully_saved, new_datapoint
def log_symbolic_patch(self, func_hash, example):
"""
Log a patched function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param example:
:return:
"""
if not isinstance(func_hash, str):
func_hash = str(func_hash)
example_data = str(example.__dict__).encode('utf-8') + b'\n'
bloom_filter_representation = func_hash + '_' + example_data.decode('utf-8')
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
self.hit_count += 1
return {}
self.miss_count += 1
# Add to Bloom Filter
self.bloom_filter.add(bloom_filter_representation)
try:
self.ensure_persistence_location_exists()
except Exception as e:
return {}
log_file_path = self.get_patch_location_for_function(func_hash, extension=PATCH_FILE_EXTENSION)
<fim_suffix>
if log_file_path not in self.flush_limit:
self.flush_limit[log_file_path] = 1
self.buffers[log_file_path].extend(example_data)
self.write_count += 1
if log_file_path not in self.buffer_rolling_size:
self.buffer_rolling_size[log_file_path] = 1
else:
self.buffer_rolling_size[log_file_path] += 1
if self.write_count >= self.write_limit:
written_datapoints = self.flush()
self.save_bloom_filter()
self.write_count = 0 # Reset counter
return written_datapoints
if len(self.buffers[log_file_path]) >= min(self.flush_limit[log_file_path], 4096): # Flush after reaching 4KB
written_datapoints = {}
try:
self.write(log_file_path, self.buffers[log_file_path], mode="a+b")
# update buffers
written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path]
self.buffers[log_file_path].clear()
self.buffer_rolling_size[log_file_path] = 0
self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path]
self.save_bloom_filter()
except Exception as e:
pass
return written_datapoints
return {}
def save_bloom_filter(self):
try:
self.bloom_filter.save()
except Exception as e:
self.warning("Could not save Bloom filter: {}".format(e))
def flush(self):
# get log directory
written_datapoints = {}
for log_file_path, buffer in self.buffers.items():
if len(buffer) > 0:
try:
self.write(log_file_path, buffer, mode="a+b")
written_datapoints[self.get_hash_from_path(log_file_path)] = self.buffer_rolling_size[log_file_path]
self.buffer_rolling_size[log_file_path] = 0
buffer.clear()
except Exception as e:
pass
return written_datapoints
def load_function_config(self, func_hash):
"""
Get the config file for the function. Uses the message and log directory
Config file has to be in .json
"""
default = False
try: # try to get the config from the disk. If inaccessible, create a new default one
self.ensure_persistence_location_exists()
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
if not self.does_object_exist(config_path):
function_config = self.default_function_config
default = True
func_config_dict = function_config.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
else:
function_config = FunctionConfig().load_from_dict(self.read_json(config_path))
except Exception as e:
function_config = self.default_function_config
default = True
return function_config, default
def update_function_config(self, func_hash, config_to_be_saved):
"""
Save the config file
"""
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
try:
func_config_dict = config_to_be_saved.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
except Exception as e:
pass
def write_json(self, path, data):
self.write(path, json.dumps(data))
def read_json(self, path):
return json.loads(self.read(path))
<fim_middle>if log_file_path not in self.buffers:
self.buffers[log_file_path] = bytearray() | if log_file_path not in self.buffers:
self.buffers[log_file_path] = bytearray() | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/utils.py
def get_model(content, logger, func_hash):
"""
Get the model from the content and the logger.
Decide on model depending on the length of the content. if is finetunable, return model, true, otherwise return model, false
Args:
content (str): the content to be aligned
logger (buffered logger): the logger
func_hash (str): the function hash
Returns:
model (str): the model to be used
finetunable (bool): whether the model is finetunable
"""
num_tokens = approximate_token_count(content)
finetune_limit = logger.finetune_token_limit
finetune_model, teacher_models = logger.get_models(func_hash)
if num_tokens < finetune_limit:
return finetune_model, True
else:
# this is just for backwards compatibility currently
if len(teacher_models) == 0 or isinstance(teacher_models[0], str):
teacher_models = [("gpt-4", 7000),("gpt-4-32k", 31000)]
for model, token_limit in teacher_models:
if num_tokens < token_limit:
return model, False
raise ValueError("The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
# tanuki_py/src/tanuki/language_models/embedding_model_manager.py
def get_embedding_case(self, args, function_description: FunctionDescription, kwargs, examples=None):
# example_input = f"Examples:{examples}\n" if examples else ""
content = f"Name: {function_description.name}\nArgs: {args}\nKwargs: {kwargs}"
function_hash = function_description.__hash__()
if function_hash in self.function_modeler.teacher_models_override: # check for overrides
model = self.function_modeler.teacher_models_override[function_hash][0] # take currently the first model
else:
model = DEFAULT_EMBEDDING_MODELS[DEFAULT_EMBEDDING_MODEL_NAME]
# loggings
if function_hash not in self.initialized_functions:
logging.info(f"Generating function embeddings for {function_description.name} with {model.model_name}")
self.initialized_functions[function_hash] = model.model_name
elif self.initialized_functions[function_hash] != model.model_name:
logging.info(f"Switching embeddings generation for {function_description.name} from {self.initialized_functions[function_hash]} to {model.model_name}")
self.initialized_functions[function_hash] = model.model_name
return content, model
# tanuki_py/src/tanuki/function_modeler.py
def _update_datapoint_config(self, repaired, func_hash):
"""
Update the config to reflect the new datapoint in the training data
First adds 1 to the current datapoints
Then updates running faults depending if priority is True or not and takes last 100
Then checks the revert condition, i.e if last 10 datapoints are 50% faulty
Finally updates the config file
Args:
priority (bool): whether the datapoint was fixed by the teacher model/should be added to the training data
"""
try:
if repaired:
self.function_configs[func_hash].current_model_stats["running_faults"].append(1)
else:
self.function_configs[func_hash].current_model_stats["running_faults"].append(0)
# take the last 100 datapoints
self.function_configs[func_hash].current_model_stats["running_faults"] = \
self.function_configs[func_hash].current_model_stats["running_faults"][-100:]
# check if the last 10 datapoints are 50% faulty, this is the switch condition
if sum(self.function_configs[func_hash].current_model_stats["running_faults"][-10:]) / 10 > 0.5:
self.function_configs[func_hash].distilled_model.model_name = ""
self.function_configs[func_hash].current_model_stats["trained_on_datapoints"] = 0
self.function_configs[func_hash].current_model_stats["running_faults"] = []
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file")
pass
"""
import json
from typing import Any, Dict
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.llm_api_abc import LLM_API
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.models.language_model_output import LanguageModelOutput
from tanuki.utils import approximate_token_count
from tanuki.validator import Validator
from tanuki.models.api_manager import APIManager
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
import logging
class LanguageModelManager(object):
"""
The LanguageModelManager is responsible for managing the language models and their outputs operationally,
this includes:
- Generating outputs from the language models
- Repairing outputs from the language models
- Saving outputs from the language models
- Finetuning the language models from the saved outputs
"""
def __init__(self,
function_modeler: FunctionModeler,
api_provider: APIManager,
generation_token_limit=512,) -> None:
self.api_provider = api_provider
self.function_modeler = function_modeler
self.default_generation_length = generation_token_limit
self.initialized_functions = {}
self.token_counts = {}
def __call__(self,
args,
function_description: FunctionDescription,
kwargs,
validator: Validator,
generation_parameters: dict) -> Any:
# add the generation length if not there
if "max_new_tokens" not in generation_parameters:
generation_parameters["max_new_tokens"] = self.default_generation_length
output = self.generate(args, kwargs, function_description, generation_parameters)
# start parsing the object, very hacky way for the time being
choice_parsed = self._parse_choice(output)
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
choice, choice_parsed, successful_repair = self.repair_output(args,
kwargs,
function_description,
output.generated_response,
validator,
generation_parameters)
if not successful_repair:
raise TypeError(
f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{output.generated_response}'")
output.generated_response = choice
output.distilled_model = False
datapoint = FunctionExample(args, kwargs, output.generated_response)
if output.suitable_for_finetuning and not output.distilled_model:
self.function_modeler.postprocess_symbolic_datapoint(function_description.__hash__(), function_description,
datapoint, repaired=not valid)
instantiated = validator.instantiate(choice_parsed, function_description.output_type_hint)
return instantiated
def _parse_choice(self, output):
try:
# json load
choice_parsed = json.loads(output.generated_response)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(output.generated_response)
except:
choice_parsed = output.generated_response
return choice_parsed
def generate(self, args, kwargs, function_description, llm_parameters={}):
"""
The main generation function, given the args, kwargs, function description and model type, generate a response and check if the datapoint can be saved to the finetune dataset
"""
func_hash = function_description.__hash__()
prompt, model, save_to_finetune, is_distilled_model = self.get_generation_case(args, kwargs,
function_description,
llm_parameters,
func_hash)
# loggings
current_function_setup = self.initialized_functions.get(func_hash, None) # getting the current function setup - model and align statements
if current_function_setup:
generator_model = current_function_setup["model"]
if is_distilled_model:
logging.info(f"Generating function outputs for {function_description.name} with a finetuned model: {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model == "":
logging.info(f"Found {len(current_function_setup['examples'])} align statements for {function_description.name}. Generating function outputs with {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model != model.model_name:
logging.info(f"Switching output generation from {generator_model} to {model.model_name} for function {function_description.name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
choice = self._synthesise_answer(prompt, model, llm_parameters)
output = LanguageModelOutput(choice, save_to_finetune, is_distilled_model)
return output
def _synthesise_answer(self, prompt, model, llm_parameters):
"""
Synthesise an answer given the prompt, model, model_type and llm_parameters
Args:
prompt (str): The prompt to send to the model
model (BaseModelConfig): The model to use for generation
llm_parameters (dict): The parameters to use for generation
return:
choice (str): The generated response
"""
system_message = model.system_message
return self.api_provider[model.provider].generate(model, system_message, prompt, **llm_parameters)
def get_generation_case(self, args, kwargs, function_description, llm_parameters, func_hash):
"""
Get the generation case with the correct prompt and model
First get the current model, then if distilled model, do zero-shot prompt and return False as suitable_for_finetune
If not distilled model, check if suitable for finetuning, create the prompt and return the correct model given the token count
"""
f = str(function_description.__dict__.__repr__())
distilled_model, teacher_models = self.function_modeler.get_models(function_description)
is_distilled_model = distilled_model.model_name != ""
suitable_for_distillation, input_prompt_token_count = self.suitable_for_finetuning_token_check(args, kwargs, f,
distilled_model)
<fim_suffix>
# no examples needed, using a finetuned model. Dont save to finetune dataset
if is_distilled_model and suitable_for_distillation:
prompt = self.construct_prompt(f, args, kwargs, [], distilled_model)
return prompt, distilled_model, suitable_for_distillation, True
else:
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=16)
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
# update the examples in the initialized_functions dict
self.initialized_functions[func_hash]["examples"] = examples
examples_token_count = sum([approximate_token_count(example) for example in examples])
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(teacher_models,
examples_token_count + input_prompt_token_count + generation_tokens,
len(examples))
if model:
examples_with_parsing_tokens = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput:{model.parsing_helper_tokens['start_token']}{align['output']}{model.parsing_helper_tokens['end_token']}" for align in
aligns]
prompt = self.construct_prompt(f, args, kwargs, examples_with_parsing_tokens, model)
return prompt, model, suitable_for_distillation, False
else:
raise ValueError(
"The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
def suitable_for_finetuning_token_check(self, args, kwargs, f, distilled_model: BaseModelConfig):
"""
Check if the inputs are suitable for finetuning, i.e are below the finetuning token count
"""
# check if finetunable
finetuning_prompt = f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
input_prompt_token_count = approximate_token_count(finetuning_prompt)
if distilled_model.system_message_token_count < 0:
distilled_model.system_message_token_count = approximate_token_count(distilled_model.system_message)
if distilled_model.instruction_token_count < 0:
distilled_model.instruction_token_count = approximate_token_count(distilled_model.instructions)
suitable_for_finetune = input_prompt_token_count + distilled_model.instruction_token_count + distilled_model.system_message_token_count < distilled_model.context_length
return suitable_for_finetune, input_prompt_token_count
def construct_prompt(self, f, args, kwargs, examples, model):
"""
Construct a prompt given the model, function description, args, kwargs and examples
Args:
model (BaseModelConfig): The model to use for generation
f (str): The function description
args (tuple): The args of the function
kwargs (tuple): The kwargs of the function
examples (list): The examples of the function
Returns:
content (str): The prompt to send to the model
"""
if examples:
final_examples = "\n".join(
[f"{align}" for align in
examples])
example_input = f"Examples:{final_examples}\n"
else:
example_input = ""
instruction_prompt = model.instructions
content = f"{instruction_prompt}\nFunction: {f}\n{example_input}---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
return content
def repair_generate(self, args, kwargs, f, failed_outputs_list, aligns, models, llm_parameters):
"""
Repair the output given the input, function description, failed outputs list, examples and models
"""
# get the token counts
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
examples_token_count = sum([approximate_token_count(example) for example in examples])
failed_examples_token_count = sum([approximate_token_count(failed_output[0]) + approximate_token_count(failed_output[1]) for failed_output in failed_outputs_list])
input_prompt_token_count = approximate_token_count(f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:")
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(models,
examples_token_count+input_prompt_token_count+generation_tokens+failed_examples_token_count,
len(examples))
if model:
prompt = self.generate_repair_prompt(args, kwargs, f, failed_outputs_list, examples, model)
logging.info(f"Previous output failed type validation, attempting to repair with {model.model_name}")
choice = self._synthesise_answer(prompt, model, llm_parameters)
return choice
else:
return None
def generate_repair_prompt(self, args, kwargs, f, failed_outputs_list, examples, model):
"""
Generate a repair prompt given the args, kwargs, function description, failed outputs list and examples
"""
if examples:
final_examples = "\n".join(
[f"{model.parsing_helper_tokens['start_token']}{align}{model.parsing_helper_tokens['end_token']}" for align in
examples])
successful_examples = f"Examples:{final_examples}\n"
else:
successful_examples = ""
failed_examples = ""
for failed_output in failed_outputs_list:
failed_examples += f"Output: {failed_output[0]}\nError: {failed_output[1]}\n\n"
end_token_addition = ""
if model.parsing_helper_tokens["end_token"]:
end_token_addition = f"Make sure to add the {model.parsing_helper_tokens['end_token']} token at the end of the output."
prompt = f"{model.repair_instruction}{end_token_addition}\nFUNCTION DESCRIPTION: {f}\n{successful_examples}---{model.parsing_helper_tokens['start_token']}Inputs:\nArgs: {args}\nKwargs: {kwargs}\nFAILED EXAMPLES: {failed_examples}Correct output:"
return prompt
def choose_model_from_tokens(self, models, input_token_count, nr_of_examples=0):
"""
Choose a model from the models given the token count and number of examples
Args:
models (list): The models to choose from
input_token_count (int): The token count of the input
nr_of_examples (int): The number of examples
Returns:
model (BaseModelConfig): The chosen model
"""
for model in models:
# check if input token count is less than the context length
# If the model config has custom messages, then use those, otherwise use the default ones
if model.system_message_token_count < 0:
model.system_message_token_count = approximate_token_count(model.system_message)
if model.instruction_token_count < 0:
model.instruction_token_count = approximate_token_count(model.instructions)
if model.parsing_helper_tokens["start_token"]:
input_token_count += 2*nr_of_examples
if model.parsing_helper_tokens["end_token"]:
input_token_count += 2*nr_of_examples
total_token_count = input_token_count + model.instruction_token_count + model.system_message_token_count
if total_token_count < model.context_length:
return model
return None
def repair_output(self,
args: tuple,
kwargs: dict,
function_description: FunctionDescription,
choice,
validator: Validator,
generation_parameters: dict) -> tuple:
"""
Repair an output, that failed type validation by generating a new output using the teacher model and the error
Args:
args (tuple): The args of the function
kwargs (dict): The kwargs of the function
function_description (FunctionDescription): The function description
choice: The output that failed type validation, type is arbitrary
validator (Validator): The validator object
Returns:
choice (str): The choice that was generated by the language model
choice_parsed: The parsed choice, type is arbitrary
valid (bool): Whether the output was correctly repaired was valid
"""
# get the teacher models
teacher_models = self.function_modeler.get_models(function_description)[1]
valid = False
retry_index = 5
f = str(function_description.__dict__.__repr__() + "\n")
error = f"Output type was not valid. Expected an valid object of type {function_description.output_type_hint}, got '{choice}'"
# instantiate the failed outputs list
failed_outputs_list = [(choice, error)]
while retry_index > 0 and not valid:
# get the alignments
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=5)
# Generate the reparied LLM output
choice = self.repair_generate(args,
kwargs,
f,
failed_outputs_list,
aligns,
teacher_models,
generation_parameters)
if not choice:
# if no choice then the input was too long for the model
# no specific error but the retry index goes down
retry_index -= 1
continue
# start parsing the object
try:
# json load
choice_parsed = json.loads(choice)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(choice)
except:
choice_parsed = choice
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
# if it's not valid, add it to the failed outputs list
error = f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{choice}'"
failed_outputs_list.append((choice, error))
retry_index -= 1
if valid:
logging.info(f"Successfully repaired output.")
return choice, choice_parsed, valid
<fim_middle>if func_hash not in self.initialized_functions:
# initialise the initialized_functions dict
self.initialized_functions[func_hash] = {"model": "", "examples": []} | if func_hash not in self.initialized_functions:
# initialise the initialized_functions dict
self.initialized_functions[func_hash] = {"model": "", "examples": []} | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
<fim_suffix>
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.") | if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.") | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
<fim_suffix>
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict) | if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/utils.py
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
<fim_suffix>
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value) | if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/utils.py
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
<fim_suffix>
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value) | if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
<fim_suffix>
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>return target_type(data) | return target_type(data) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/persistence/filter/filesystem_bloom.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def get_patch_location_for_function(self, func_hash, extension: Union[
ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str:
"""
Get the local location of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
return os.path.join(self.log_directory, func_hash + extension)
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def load_dataset(self, dataset_type, func_hash, return_type="both") -> Optional[int]:
"""
Get the size of the dataset for a function hash
"""
log_directory = self._get_log_directory()
dataset_type_map = {"alignments": ALIGN_FILE_EXTENSION,
"positive": POSITIVE_FILE_EXTENSION,
"negative": NEGATIVE_FILE_EXTENSION,
"patches": PATCH_FILE_EXTENSION}
log_file_path = os.path.join(log_directory, func_hash + dataset_type_map[dataset_type])
if not os.path.exists(log_file_path):
if return_type == "both":
return 0, None
elif return_type == "dataset":
return None
elif return_type == "length":
return 0
try:
with open(log_file_path, "rb") as f:
dataset = f.read()
dataset_string = repr(dataset)
dataset_length = dataset_string.count("\\n") - dataset_string.count("\\\\n")
if return_type == "both":
return dataset_length, dataset
elif return_type == "dataset":
return dataset
elif return_type == "length":
return dataset_length
except Exception as e:
if return_type == "both":
return 0, None
elif return_type == "dataset":
return None
elif return_type == "length":
return 0
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def read(self, path: str) -> str:
"""
Read data from a file
"""
with open(path, "r") as f:
return f.read()
"""
import os
from bitarray._bitarray import bitarray
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
class BloomFilterFileSystemDriver(IBloomFilterPersistence):
"""
This is a Filesystem implementation of a Bloom Filter persistence layer.
"""
def __init__(self, log_directory: str):
self.log_directory = log_directory
def save(self, bit_array: bitarray) -> None:
"""
Write a bloom filter array of bits to the local filesystem.
:param bloom_filter: A bloom filter which tracks unique function invocations
"""
bloom_filter_path = os.path.join(self.log_directory, 'bloom_filter_state.bin')
# Append 0 bits to make the length a multiple of 8
while len(bit_array) % 8 != 0:
bit_array.append(0)
with open(bloom_filter_path, 'wb') as f:
f.write(bit_array.tobytes())
def load(self) -> bitarray:
"""
Load a bloom filter from the local filesystem.
:return: A bloom filter object containing the state of unique function invocations
"""
<fim_suffix>
with open(bloom_filter_path, 'rb') as f:
bit_array = bitarray()
bit_array.frombytes(f.read())
while len(bit_array) % 8 != 0:
bit_array.append(0)
return bit_array<fim_middle>bloom_filter_path = os.path.join(self.log_directory, 'bloom_filter_state.bin') | bloom_filter_path = os.path.join(self.log_directory, 'bloom_filter_state.bin') | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/register.py
def get_class_definition(class_type):
"""Helper function to get class definition source if not a built-in type"""
if hasattr(class_type, "__origin__"): # Check if it's a generic type
origin_type = class_type.__origin__
if origin_type is Literal: # Handle Literal case
return [literal for literal in class_type.__args__]
elif hasattr(class_type, "__args__"): # Access inner types
return [get_class_definition(arg) for arg in class_type.__args__ if arg is not None]
elif inspect.isclass(class_type) and class_type.__module__ != "builtins":
return get_source(class_type)
return class_type.__name__
# tanuki_py/src/tanuki/__init__.py
def extract_attributes(result):
attributes = {}
# If the result is a list, get its length
if isinstance(result, list):
attributes['length'] = len(result)
# If the result is a dictionary, get its keys (or any other attributes)
elif isinstance(result, dict):
attributes['keys'] = list(result.keys())
return attributes
# tanuki_py/src/tanuki/function_modeler.py
def load_symbolic_align_statements(self, function_hash):
"""
Load all align statements
First check the data storage blacklist,
if the func hash is in the blacklist, then set the dataset size to 0 and the align buffer to empty bytearray
"""
if function_hash in self.store_data_blacklist:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 0
self.symbolic_align_buffer[function_hash] = bytearray()
elif function_hash not in self.symbolic_align_buffer:
dataset_size, align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, function_hash, type="both")
if align_dataset:
self.symbolic_align_buffer[function_hash] = bytearray(align_dataset)
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = dataset_size
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
<fim_suffix>
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>origin = get_origin(target_type) | origin = get_origin(target_type) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/models/function_description.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/bloom_filter.py
def hash_functions(self, string):
# h1(x)
hash1 = int(hashlib.sha256(string.encode('utf-8')).hexdigest(), 16)
# h2(x)
hash2 = int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16)
return hash1, hash2
# tanuki_py/src/tanuki/language_models/aws_bedrock_api.py
def check_runtime(self):
# check if the runtime is configured
if self.bedrock_runtime is None:
self.bedrock_runtime = boto3.client(
service_name='bedrock-runtime',
region_name=os.environ.get("AWS_DEFAULT_REGION")
)
# tanuki_py/src/tanuki/register.py
def get(func_name) -> Tuple[FunctionType, Callable]:
if func_name not in alignable_symbolic_functions and func_name not in alignable_embedding_functions:
pass
if func_name in alignable_symbolic_functions:
return FunctionType.SYMBOLIC, alignable_symbolic_functions[func_name]
elif func_name in alignable_embedding_functions:
return FunctionType.EMBEDDABLE, alignable_embedding_functions[func_name]
"""
import hashlib
from dataclasses import dataclass
from typing import Dict, Optional, Literal
from tanuki.models.function_type import FunctionType
from tanuki.utils import json_dumps
@dataclass(frozen=True)
class FunctionDescription:
name: str
docstring: str
input_type_hints: Dict[str, type]
input_class_definitions: Dict[str, str]
output_type_hint: type
output_class_definition: Optional[str]
type: FunctionType = FunctionType.SYMBOLIC
def __hash__(self, purpose: str = "general"):
if purpose == "general":
json_encoded = json_dumps(self).encode('utf-8')
h = hashlib.md5(json_encoded).hexdigest()
<fim_suffix>
if purpose == "finetune":
json_encoded = json_dumps(self).encode('utf-8')
h = hashlib.shake_256(json_encoded).hexdigest(8)
return str(h)<fim_middle>return str(h) | return str(h) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/function_modeler.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/language_models/language_model_manager.py
def __init__(self,
function_modeler: FunctionModeler,
api_provider: APIManager,
generation_token_limit=512,) -> None:
self.api_provider = api_provider
self.function_modeler = function_modeler
self.default_generation_length = generation_token_limit
self.initialized_functions = {}
self.token_counts = {}
# tanuki_py/src/tanuki/language_models/embedding_model_manager.py
def __init__(self, function_modeler, api_provider: APIManager):
self.function_modeler = function_modeler
self.api_provider = api_provider
self.initialized_functions = {}
# tanuki_py/src/tanuki/language_models/embedding_api_abc.py
def __init__(self) -> None:
pass
"""
import ast
import datetime
import io
import json
from typing import List, Tuple, Dict, Union
import logging
from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \
NEGATIVE_EMBEDDABLE_ALIGNMENTS, OPENAI_PROVIDER
from tanuki.models.function_type import FunctionType
from tanuki.language_models.llm_configs import DEFAULT_TEACHER_MODELS, DEFAULT_EMBEDDING_MODELS, DEFAULT_STUDENT_MODELS
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API
from tanuki.models.finetune_job import FinetuneJob
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.utils import approximate_token_count, prepare_object_for_saving, encode_int, decode_int
import copy
from tanuki.models.function_config import FunctionConfig
from tanuki.models.api_manager import APIManager
class FunctionModeler(object):
"""
This class manages the registered function models and their datasets
comprised of symbolic and embeddable alignments, and symbolic and embeddable patches
"""
def __init__(self, data_worker: DatasetWorker,
api_provider: APIManager,
environment_id=0,
) -> None:
<fim_suffix>
self.data_worker = data_worker
self.distillation_token_limit = 3000 # the token limit for finetuning
self.symbolic_align_buffer = {}
self.embeddable_align_buffer = {}
self._get_datasets()
self.environment_id = environment_id
self.check_finetune_blacklist = []
self.execute_finetune_blacklist = []
self.store_data_blacklist = []
self.api_provider = api_provider
self.teacher_models_override = {}
self.student_model_override = {}
self.startup_logging_checker = {}
def _get_dataset_info(self, dataset_type, func_hash, type="length"):
"""
Get the dataset size for a function hash
"""
return self.data_worker.load_dataset(dataset_type, func_hash, return_type=type)
def _configure_function_models(self, teacher_models: List[Union[str, BaseModelConfig]],
student_model: str,
func_hash: str,
task_type: str):
"""
Configure the function models
"""
if teacher_models:
self._configure_teacher_models(teacher_models, func_hash, task_type)
if student_model:
self._configure_student_model(student_model, func_hash, task_type)
if teacher_models and not student_model:
for model_config in self.teacher_models_override[func_hash]:
# ban all non-openai models from finetuning if teacher is not openai and student is not specified because it doesnt make sense
if model_config.provider != OPENAI_PROVIDER and func_hash not in self.check_finetune_blacklist:
self.check_finetune_blacklist.append(func_hash)
if model_config.provider != OPENAI_PROVIDER and func_hash not in self.execute_finetune_blacklist:
self.execute_finetune_blacklist.append(func_hash)
def _configure_teacher_models(self,
teacher_models: List[Union[str, BaseModelConfig]],
func_hash: str,
task_type: str):
"""
Add custom teacher models to the function config
First this is added to the teacher_models_override dict, which is used to override the teacher models
Args:
teacher_models: A list of teacher models to use for the function hash
func_hash: The function hash to add the teacher models to
"""
if func_hash not in self.teacher_models_override:
self.teacher_models_override[func_hash] = []
if task_type == FunctionType.EMBEDDABLE:
preconfigured_models = DEFAULT_EMBEDDING_MODELS
elif task_type == FunctionType.SYMBOLIC:
preconfigured_models = DEFAULT_TEACHER_MODELS
for model in teacher_models:
if isinstance(model, str):
if model not in preconfigured_models:
raise Exception(f"Teacher model {model} not supported by default. Please include it in the list in extended config format")
model_config = preconfigured_models[model]
elif isinstance(model, BaseModelConfig):
model_config = model
self.teacher_models_override[func_hash].append(model_config)
def _configure_student_model(self,
student_model: str,
func_hash: str,
task_type: str):
"""
Add custom student models to the function config
First this is added to the teacher_models_override dict, which is used to override the teacher models
Args:
teacher_models: A list of teacher models to use for the function hash
func_hash: The function hash to add the teacher models to
"""
if task_type == FunctionType.EMBEDDABLE:
logging.info("Embeddable function type does not support student models")
preconfigured_models = DEFAULT_STUDENT_MODELS
if student_model not in preconfigured_models:
raise Exception(f"Student model {student_model} is currently not supported.")
model_config = preconfigured_models[student_model]
self.student_model_override[func_hash] = model_config
def _get_datasets(self):
"""
Get the existing datasets from the data worker
"""
self.dataset_sizes = self.data_worker.load_existing_datasets()
def save_embeddable_align_statements(self,
function_hash: str,
args,
kwargs,
positive_pairs: List[Tuple[List, Dict]],
negative_pairs: List[Tuple[List, Dict]]):
"""
Save the contrastive align statements for the embeddable function.
Do not save if the function hash is in the store data blacklist
Args:
function_hash: A unique hash for the function
args: The arguments of the function
kwargs: The keyword arguments of the function
positive_pairs: A list of the other function invocations that are should have equivalent embeddings
negative_pairs: A list of the other function invocations that are should have different embeddings
"""
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
# prepare positive pairs for saving
parsed_positive_pairs = []
for pair in positive_pairs:
copy_pair = copy.deepcopy(pair)
parsed_pair = prepare_object_for_saving(copy_pair)
parsed_positive_pairs.append(parsed_pair)
# prepare negative pairs for saving
parsed_negative_pairs = []
for pair in negative_pairs:
copy_pair = copy.deepcopy(pair)
parsed_pair = prepare_object_for_saving(copy_pair)
parsed_negative_pairs.append(parsed_pair)
# save the contrastive pairs
for pair in parsed_positive_pairs:
self._save_contrastive_alignment_pair(function_hash, parsed_args, parsed_kwargs, pair, positive=True)
for pair in parsed_negative_pairs:
self._save_contrastive_alignment_pair(function_hash, parsed_args, parsed_kwargs, pair, positive=False)
def _save_contrastive_alignment_pair(self, function_hash: str, args, kwargs, pair, positive=True):
"""
Save a contrastive pair
"""
example = FunctionExample(args, kwargs, pair)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_embeddable_align(function_hash, example, positive)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if positive:
if function_hash in self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if not positive:
if function_hash in self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.embeddable_align_buffer:
self.embeddable_align_buffer[function_hash] = bytearray()
self.embeddable_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
def save_symbolic_align_statements(self, function_hash, args, kwargs, output):
"""
Save the align statements and add to the align buffer
Do not save if the function hash is in the store data blacklist
Then just add the datapoints to the align buffer
"""
# prepare output for saving and later parsing
# make a deepcopy of the output to avoid changing the original object
copy_output = copy.deepcopy(output)
parsed_output = prepare_object_for_saving(copy_output)
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
example = FunctionExample(parsed_args, parsed_kwargs, parsed_output)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_symbolic_align(function_hash, example)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if function_hash in self.dataset_sizes[SYMBOLIC_ALIGNMENTS]:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.symbolic_align_buffer:
self.symbolic_align_buffer[function_hash] = bytearray()
self.symbolic_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
def save_symbolic_datapoint(self, func_hash, example):
"""
Save datapoint to the training data
"""
written_datapoints = self.data_worker.log_symbolic_patch(func_hash, example)
for func_hash, datapoints in written_datapoints.items():
if func_hash in self.dataset_sizes[PATCHES]:
# if the dataset size is -1, it means we havent read in the dataset size yet
if self.dataset_sizes[PATCHES][func_hash] == -1:
self.dataset_sizes[PATCHES][func_hash] = self._get_dataset_info(PATCHES, func_hash, type="length")
else:
self.dataset_sizes[PATCHES][func_hash] += datapoints
else:
self.dataset_sizes[PATCHES][func_hash] = datapoints
return len(written_datapoints) > 0
def get_symbolic_alignments(self, func_hash, max=20):
"""
Get all symbolic aligns for a function hash
"""
if func_hash not in self.symbolic_align_buffer:
return []
buffer = self.symbolic_align_buffer[func_hash]
return self._get_examples_from_alignment_buffer(buffer, max)
def get_embeddable_alignments(self, func_hash, max=20):
"""
Get all embeddable aligns for a function hash
"""
if func_hash not in self.embeddable_align_buffer:
return []
buffer = self.embeddable_align_buffer[func_hash]
return self._get_examples_from_alignment_buffer(buffer, max)
def _get_examples_from_alignment_buffer(self, buffer, max=20):
"""
Get examples from a buffer
"""
split_buffer = bytes(buffer).split(b"\n")
# byte array of stringed python dicts into dict objects
example_set = set()
for example in split_buffer:
if example == b"":
continue
example_set.add(example)
# easy and straightforward way to get nr of words (not perfect but doesnt need to be)
# Can do the proper way of tokenizing later, it might be slower and we dont need 100% accuracy
example_element_limit = EXAMPLE_ELEMENT_LIMIT
examples = []
for example_bytes in split_buffer:
if example_bytes in example_set:
nr_of_elements = approximate_token_count(example_bytes)
example_element_limit -= nr_of_elements
if example_element_limit < 0:
break
example = example_bytes.decode('utf-8')
# json load the example
try:
example = json.loads(example)
except:
example = ast.literal_eval(example)
examples.append(example)
example_set.remove(example_bytes)
return list(examples)[:max]
def load_symbolic_align_statements(self, function_hash):
"""
Load all align statements
First check the data storage blacklist,
if the func hash is in the blacklist, then set the dataset size to 0 and the align buffer to empty bytearray
"""
if function_hash in self.store_data_blacklist:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 0
self.symbolic_align_buffer[function_hash] = bytearray()
elif function_hash not in self.symbolic_align_buffer:
dataset_size, align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, function_hash, type="both")
if align_dataset:
self.symbolic_align_buffer[function_hash] = bytearray(align_dataset)
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = dataset_size
def postprocess_symbolic_datapoint(self, func_hash, function_description, example, repaired=True):
"""
Postprocess the datapoint
First check if the datapoint should be added to the training data
Add the datapoint if it should be added
Then check if the function should be finetuned and execute finetuning if it should
"""
try:
if func_hash not in self.store_data_blacklist:
added = self.save_symbolic_datapoint(func_hash, example)
if added:
self._update_datapoint_config(repaired, func_hash)
except Exception as e:
print(e)
print("Could not add datapoint to training data")
if func_hash not in self.execute_finetune_blacklist:
self.check_for_finetuning(function_description, func_hash)
def load_function_config(self, func_hash, function_description):
"""
Load the config file for a function hash
"""
config, default = self.data_worker.load_function_config(func_hash)
if func_hash in self.student_model_override and config.distilled_model.model_name == "":
config.distilled_model = self.student_model_override[func_hash]
if default and func_hash not in self.check_finetune_blacklist:
finetuned, finetune_config = self._check_for_finetunes(function_description, config.distilled_model)
if finetuned:
config = finetune_config
# update teachers if not default
if func_hash in self.teacher_models_override:
config.teacher_models = self.teacher_models_override[func_hash]
self.function_configs[func_hash] = config
return config
def _check_for_finetunes(self, function_description: FunctionDescription, model_config : BaseModelConfig) -> Tuple[bool, Dict]:
# hash the function_hash into 16 characters (to embed it into the name of OpenAI finetunes, for later retrieval)
logging.info(f"Checking for finetunes for {function_description.name} using {model_config.provider}")
finetune_hash = function_description.__hash__(purpose="finetune") + encode_int(self.environment_id)
# List 10 fine-tuning jobs
finetunes: List[FinetuneJob] = self.api_provider[model_config.provider].list_finetuned(model_config, limit=1000)
# Check if the function_hash is in the fine-tuning jobs
# the finetunes are in chronological order starting from newest
# So this gets the latest finetune
for finetune in finetunes:
# check if the finetune hash is in the fine-tuned model name
if finetune.status == "succeeded" and finetune_hash in finetune.fine_tuned_model.model_name:
try:
config = self._construct_config_from_finetune(finetune_hash, finetune)
# save the config
self.data_worker.update_function_config(function_description.__hash__(), config)
logging.info(f"Found finetuned model for {function_description.name} [{config.distilled_model.model_name}]")
return True, config
except:
logging.info(f"Found finetuned model for {function_description.name} [{finetune.fine_tuned_model.model_name}] but could not load it")
return False, {}
logging.info(f"No finetuned model found for {function_description.name}")
return False, {}
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
def get_models(self, function_description):
"""
Return the current model from the config file
"""
func_hash = function_description.__hash__()
if func_hash in self.function_configs:
func_config = self.function_configs[func_hash]
else:
func_config = self.load_function_config(func_hash, function_description)
return func_config.distilled_model, func_config.teacher_models
def _update_datapoint_config(self, repaired, func_hash):
"""
Update the config to reflect the new datapoint in the training data
First adds 1 to the current datapoints
Then updates running faults depending if priority is True or not and takes last 100
Then checks the revert condition, i.e if last 10 datapoints are 50% faulty
Finally updates the config file
Args:
priority (bool): whether the datapoint was fixed by the teacher model/should be added to the training data
"""
try:
if repaired:
self.function_configs[func_hash].current_model_stats["running_faults"].append(1)
else:
self.function_configs[func_hash].current_model_stats["running_faults"].append(0)
# take the last 100 datapoints
self.function_configs[func_hash].current_model_stats["running_faults"] = \
self.function_configs[func_hash].current_model_stats["running_faults"][-100:]
# check if the last 10 datapoints are 50% faulty, this is the switch condition
if sum(self.function_configs[func_hash].current_model_stats["running_faults"][-10:]) / 10 > 0.5:
self.function_configs[func_hash].distilled_model.model_name = ""
self.function_configs[func_hash].current_model_stats["trained_on_datapoints"] = 0
self.function_configs[func_hash].current_model_stats["running_faults"] = []
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file")
pass
def _update_config_file(self, func_hash):
self.data_worker.update_function_config(func_hash, self.function_configs[func_hash])
def check_for_finetuning(self, function_description, func_hash):
"""
Check for finetuning status
If already finetuning, check for finetuning status
If not finetuning, check for finetuning condition and execute finetuning if condition is met
"""
try:
# check if already finetuning
if "job_id" in self.function_configs[func_hash].current_training_run:
# check for job status
self._check_finetuning_status(func_hash, function_description)
else:
# check for finetuning condition
if self._check_finetuning_condition(func_hash, function_description):
self._execute_finetuning(function_description, func_hash)
except Exception as e:
print(e)
print("Error checking for finetuning")
def _check_finetuning_condition(self, func_hash, function_description):
"""
Check if the finetuning condition is met
Currently finetuning condition is dependent on the number of symbolic datapoints since last finetuning
"""
if func_hash not in self.function_configs:
return False
training_threshold = (2 ** self.function_configs[func_hash].nr_of_training_runs) * 200
align_dataset_size = self.dataset_sizes[SYMBOLIC_ALIGNMENTS][func_hash] if func_hash in self.dataset_sizes[
SYMBOLIC_ALIGNMENTS] else 0
patch_dataset_size = self.dataset_sizes[PATCHES][func_hash] if func_hash in self.dataset_sizes[PATCHES] else 0
if patch_dataset_size == -1:
# if havent read in the patch dataset size, read it in
patch_dataset_size = self._get_dataset_info(PATCHES, func_hash, type="length")
self.dataset_sizes[PATCHES][func_hash] = patch_dataset_size
if func_hash not in self.startup_logging_checker:
logging.info(f"Function {function_description.name} [{align_dataset_size} aligns | {patch_dataset_size} runs] will be finetuned from"\
f" {self.function_configs[func_hash].teacher_models[0].model_name} using {self.function_configs[func_hash].distilled_model.provider} in "\
f"{training_threshold-(patch_dataset_size + align_dataset_size)} runs")
self.startup_logging_checker[func_hash] = True
return (patch_dataset_size + align_dataset_size) > training_threshold
def _execute_finetuning(self, function_description, func_hash):
"""
Execute the finetuning
First create the OpenAI compatible dataset with jsonL file and upload it
Then submit the OpenAI finetuning job
Finally update the config file to reflect the new finetuning job as current
"""
# get function description
function_string = str(function_description.__dict__.__repr__() + "\n")
# get the align dataset
align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, func_hash, type="dataset")
if not align_dataset:
align_dataset = ""
else:
align_dataset = align_dataset.decode('utf-8')
# get the patch dataset
patch_dataset = self._get_dataset_info(PATCHES, func_hash, type="dataset")
if not patch_dataset:
patch_dataset = ""
else:
patch_dataset = patch_dataset.decode('utf-8')
if align_dataset == "" and patch_dataset == "":
return
dataset = align_dataset + patch_dataset
dataset.replace("\\n", "[SEP_TOKEN]")
dataset = dataset.split("\n")
dataset = [x.replace("[SEP_TOKEN]", "\\n") for x in dataset if x != ""]
# read in the dataset file
dataset = [ast.literal_eval(x) for x in dataset]
#
# create the openai dataset
instruction = "You are given below a function description and input data. The function description of what the function must carry out can be found in the Function section, with input and output type hints. The input data can be found in Input section. Using the function description, apply the function to the Input and return a valid output type, that is acceptable by the output_class_definition and output_class_hint. Return None if you can't apply the function to the input or if the output is optional and the correct output is None.\nINCREDIBLY IMPORTANT: Only output a JSON-compatible string in the correct response format."
finetuning_dataset = [{"messages": [
{
"role": "system",
"content": f"You are a skillful and accurate language model, who applies a described function on input data. Make sure the function is applied accurately and correctly and the outputs follow the output type hints and are valid outputs given the output types."
},
{"role": "user",
"content": f"{instruction}\nFunction: {function_string}---\nInputs:\nArgs: {x['args']}\nKwargs: {x['kwargs']}\nOutput:"},
{"role": "assistant", "content": str(x['output']) if x['output'] is not None else "None"}]}
for x in dataset]
# Create an in-memory text stream
temp_file = io.BytesIO()
# Write data to the stream
for idx, item in enumerate(finetuning_dataset):
temp_file.write(json.dumps(item).encode('utf-8'))
if idx != len(finetuning_dataset) - 1:
temp_file.write("\n".encode('utf-8'))
# Reset the stream position to the beginning
temp_file.seek(0)
# create the finetune hash
finetune_hash = function_description.__hash__(purpose="finetune")
nr_of_training_runs = self.function_configs[func_hash].nr_of_training_runs
finetune_hash += encode_int(self.environment_id)
finetune_hash += encode_int(nr_of_training_runs)
# here can be sure that datasets were read in as that is checked in the finetune_check
align_dataset_size = self.dataset_sizes[SYMBOLIC_ALIGNMENTS][func_hash] if func_hash in self.dataset_sizes[
SYMBOLIC_ALIGNMENTS] else 0
patch_dataset_size = self.dataset_sizes[PATCHES][func_hash] if func_hash in self.dataset_sizes[PATCHES] else 0
total_dataset_size = align_dataset_size + patch_dataset_size
# Use the stream as a file
try:
finetune_provider = self.function_configs[func_hash].distilled_model.provider
logging.info(f"Starting finetuning for {function_description.name} using {finetune_provider} for {self.function_configs[func_hash].distilled_model.base_model_for_sft}")
finetuning_response: FinetuneJob = self.api_provider[finetune_provider].finetune(file=temp_file,
suffix=finetune_hash,
model_config = self.function_configs[func_hash].distilled_model,)
except Exception as e:
logging.info(f"Could not start finetuning for {function_description.name} using {finetune_provider}. Error: {e}")
return
self.function_configs[func_hash].current_training_run = {"job_id": finetuning_response.id,
"trained_on_datapoints": total_dataset_size,
"last_checked": datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")}
# update the config json file
try:
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file to register a finetuning run")
def _check_finetuning_status(self, func_hash, function_description):
"""
Check the status of the current finetuning job
If the job is finished, update the config file to reflect the new model
"""
job_id = self.function_configs[func_hash].current_training_run["job_id"]
last_checked = self.function_configs[func_hash].current_training_run["last_checked"]
# check if last checked was more than 30 mins ago
if (datetime.datetime.now() - datetime.datetime.strptime(last_checked,
"%Y-%m-%d %H:%M:%S")).total_seconds() > 1800:
finetune_provider = self.function_configs[func_hash].distilled_model.provider
response = self.api_provider[finetune_provider].get_finetuned(job_id, model_config = self.function_configs[func_hash].distilled_model)
self.function_configs[func_hash].current_training_run["last_checked"] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")
if response.status == "succeeded" or response.status == "failed":
self._update_finetune_config(response, func_hash, function_description)
else:
self._update_config_file(func_hash)
def _update_finetune_config(self, response: FinetuneJob, func_hash, function_description):
"""
Update the config file to reflect the new model and switch the current model to the finetuned model
"""
self.function_configs[func_hash].update_with_finetuned_response(response)
logging.info(f"Finetuning for {function_description.name} using {self.function_configs[func_hash].distilled_model.provider} finished with status: {response.status}."\
f" The id of the finetuned model is {response.fine_tuned_model.model_name}")
try:
self._update_config_file(func_hash)
except Exception as e:
logging.info(f"Could not update the function configuration file with the finetuned model for {function_description.name}. Error: {e}")
pass
<fim_middle>self.function_configs = {} | self.function_configs = {} | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
<fim_suffix>
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>return dict(instantiated_dict) | return dict(instantiated_dict) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def load_existing_datasets(self) -> Dict[str, Dict[str, str]]:
log_directory = self.log_directory
dataset_lengths = {
SYMBOLIC_ALIGNMENTS: {},
POSITIVE_EMBEDDABLE_ALIGNMENTS: {},
NEGATIVE_EMBEDDABLE_ALIGNMENTS: {},
PATCHES: {},
}
try:
if not os.path.exists(log_directory):
os.makedirs(log_directory)
# get all the files in the log directory
files = os.listdir(log_directory)
# discard all .json files
files = [x for x in files if ".json" not in x]
except Exception as e:
return dataset_lengths
for file in files:
if ALIGN_FILE_EXTENSION not in file \
and PATCH_FILE_EXTENSION not in file \
and POSITIVE_FILE_EXTENSION not in file \
and NEGATIVE_FILE_EXTENSION not in file:
continue
elif ALIGN_FILE_EXTENSION in file:
dataset_type = SYMBOLIC_ALIGNMENTS
elif POSITIVE_FILE_EXTENSION in file:
dataset_type = POSITIVE_EMBEDDABLE_ALIGNMENTS
elif NEGATIVE_FILE_EXTENSION in file:
dataset_type = NEGATIVE_EMBEDDABLE_ALIGNMENTS
else:
dataset_type = PATCHES
func_hash = file.replace(ALIGN_FILE_EXTENSION, "").replace(PATCH_FILE_EXTENSION, "")
dataset_lengths[dataset_type][func_hash] = -1
return dataset_lengths
# tanuki_py/src/tanuki/models/function_config.py
def load_from_dict(self, json_dict):
"""
Load the function config from a dict
Args:
json_dict: The dict to load the function config from
Returns:
The function config
"""
self.distilled_model = config_factory.create_config(json_dict["distilled_model"], DISTILLED_MODEL)
self.current_model_stats = json_dict["current_model_stats"]
self.last_training_run = json_dict["last_training_run"]
self.current_training_run = json_dict["current_training_run"]
self.nr_of_training_runs = json_dict["nr_of_training_runs"]
if "teacher_models" in json_dict and len(json_dict["teacher_models"]) > 0:
self.teacher_models = [config_factory.create_config(teacher_model, TEACHER_MODEL) for teacher_model in json_dict["teacher_models"]]
return self
# tanuki_py/src/tanuki/models/function_config.py
def to_dict(self):
"""
Convert the function config to a dict
Returns:
The dict
"""
try:
config_dictionary = self.model_dump()
except AttributeError as e:
config_dictionary = self.dict()
return config_dictionary
"""
import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, NEGATIVE_FILE_EXTENSION, PATCH_FILE_EXTENSION
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.models.function_config import FunctionConfig
# PATCH_FILE_EXTENSION_TYPE = Literal[".patches"]
# ALIGN_FILE_EXTENSION_TYPE = Literal[".alignments"]
# POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".positive_embedding"]
# NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".negative_embedding"]
#
# PATCH_FILE_EXTENSION: PATCH_FILE_EXTENSION_TYPE = ".patches"
# ALIGN_FILE_EXTENSION: ALIGN_FILE_EXTENSION_TYPE = ".alignments"
# POSITIVE_EMBEDDING_FILE_EXTENSION: POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_positives"
# NEGATIVE_EMBEDDING_FILE_EXTENSION: NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_negatives"
#
# EXPECTED_ITEMS = 10000
# FALSE_POSITIVE_RATE = 0.01
# LIB_NAME = "tanuki"
# ENVVAR = "TANUKI_LOG_DIR"
class ABCBufferedLogger(DatasetWorker):
def __init__(self, name, level=15):
self.buffers = {}
self.mapped_files = {}
self.miss_count = 0
self.hit_count = 0
self.flush_limit = {}
self.buffer_rolling_size = {}
self.write_count = 0
self.write_limit = 1000 # Save the Bloom filter every 1000 writes
super().__init__(name, level)
self.bloom_filter = self.create_bloom_filter()
self.load_bloom_filter()
self.default_function_config = FunctionConfig()
@abstractmethod
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
"""
Get an instance of the bloom filter persistence provider. This exposes some persistent file storage,
that must support reading and writing raw byte streams.
:return:
"""
pass
@abstractmethod
def load_existing_datasets(self) -> Dict[str, Dict[str, Any]]:
"""
Get the lengths of all datasets backing the registered functions, including aligns.
:return:
"""
pass
@abstractmethod
def ensure_persistence_location_exists(self):
"""
Ensure that the place we will be writing to actually exists. If not, create it.
"""
pass
@abstractmethod
def get_patch_location_for_function(self, func_hash, extension="") -> str:
"""
Get the address of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
pass
@abstractmethod
def write(self, path, data, mode="a") -> None:
pass
@abstractmethod
def read(self, path) -> str:
pass
@abstractmethod
def get_hash_from_path(self, path) -> str:
pass
@abstractmethod
def does_object_exist(self, path) -> bool:
pass
def create_bloom_filter(self):
bloom_filter_persistence = self.get_bloom_filter_persistence()
bloom_filter = BloomFilter(
bloom_filter_persistence,
expected_number_of_elements=EXPECTED_ITEMS,
false_positive_probability=FALSE_POSITIVE_RATE)
return bloom_filter
def load_bloom_filter(self):
try:
self.bloom_filter.load()
except FileNotFoundError:
self.debug("No Bloom filter found. Creating a new one.")
def write_symbolic_align_call(self, func_hash, example) -> bool:
log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool:
if positive:
log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION)
else:
log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def log_embeddable_align(self, func_hash, example, positive=True, **kws):
"""
Log a contrastive function invocation
Args:
func_hash: A string representation of the function signature and input parameters
example: The example object
positive: Whether the example is positive or negative
**kws:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_embeddable_align_call(func_hash, example, positive)
return successfully_saved, new_datapoint
def log_symbolic_align(self, func_hash, *args, **kws):
"""
Log an align function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param args: Example objects
:param kws:
:return:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
example = args[0]
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_symbolic_align_call(func_hash, example)
return successfully_saved, new_datapoint
def log_symbolic_patch(self, func_hash, example):
"""
Log a patched function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param example:
:return:
"""
if not isinstance(func_hash, str):
func_hash = str(func_hash)
example_data = str(example.__dict__).encode('utf-8') + b'\n'
bloom_filter_representation = func_hash + '_' + example_data.decode('utf-8')
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
self.hit_count += 1
return {}
self.miss_count += 1
# Add to Bloom Filter
self.bloom_filter.add(bloom_filter_representation)
try:
self.ensure_persistence_location_exists()
except Exception as e:
return {}
log_file_path = self.get_patch_location_for_function(func_hash, extension=PATCH_FILE_EXTENSION)
if log_file_path not in self.buffers:
self.buffers[log_file_path] = bytearray()
if log_file_path not in self.flush_limit:
self.flush_limit[log_file_path] = 1
self.buffers[log_file_path].extend(example_data)
self.write_count += 1
if log_file_path not in self.buffer_rolling_size:
self.buffer_rolling_size[log_file_path] = 1
else:
self.buffer_rolling_size[log_file_path] += 1
if self.write_count >= self.write_limit:
written_datapoints = self.flush()
self.save_bloom_filter()
self.write_count = 0 # Reset counter
return written_datapoints
if len(self.buffers[log_file_path]) >= min(self.flush_limit[log_file_path], 4096): # Flush after reaching 4KB
written_datapoints = {}
try:
self.write(log_file_path, self.buffers[log_file_path], mode="a+b")
# update buffers
written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path]
self.buffers[log_file_path].clear()
self.buffer_rolling_size[log_file_path] = 0
self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path]
self.save_bloom_filter()
except Exception as e:
pass
return written_datapoints
return {}
def save_bloom_filter(self):
try:
self.bloom_filter.save()
except Exception as e:
self.warning("Could not save Bloom filter: {}".format(e))
def flush(self):
# get log directory
written_datapoints = {}
for log_file_path, buffer in self.buffers.items():
if len(buffer) > 0:
try:
self.write(log_file_path, buffer, mode="a+b")
written_datapoints[self.get_hash_from_path(log_file_path)] = self.buffer_rolling_size[log_file_path]
self.buffer_rolling_size[log_file_path] = 0
buffer.clear()
except Exception as e:
pass
return written_datapoints
def load_function_config(self, func_hash):
"""
Get the config file for the function. Uses the message and log directory
Config file has to be in .json
"""
default = False
try: # try to get the config from the disk. If inaccessible, create a new default one
self.ensure_persistence_location_exists()
log_file_path = self.get_patch_location_for_function(func_hash)
<fim_suffix>
if not self.does_object_exist(config_path):
function_config = self.default_function_config
default = True
func_config_dict = function_config.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
else:
function_config = FunctionConfig().load_from_dict(self.read_json(config_path))
except Exception as e:
function_config = self.default_function_config
default = True
return function_config, default
def update_function_config(self, func_hash, config_to_be_saved):
"""
Save the config file
"""
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
try:
func_config_dict = config_to_be_saved.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
except Exception as e:
pass
def write_json(self, path, data):
self.write(path, json.dumps(data))
def read_json(self, path):
return json.loads(self.read(path))
<fim_middle>config_path = f"{log_file_path}.json" | config_path = f"{log_file_path}.json" | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/utils.py
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
<fim_suffix>
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>key_type, value_type = args | key_type, value_type = args | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
<fim_suffix>
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>item_type = get_args(target_type)[0] if get_args(target_type) else Any | item_type = get_args(target_type)[0] if get_args(target_type) else Any | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
<fim_suffix>
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>item_type = item_types[0] if item_types else Any | item_type = item_types[0] if item_types else Any | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/abc_buffered_logger.py
def ensure_persistence_location_exists(self):
"""
Ensure that the place we will be writing to actually exists. If not, create it.
"""
pass
# tanuki_py/src/tanuki/__init__.py
def mock_func(*args, **kwargs):
hashed_description = description.__hash__()
function_type, func = Register.get(func_name)
# If we are aligning a function that returns an embedding,
# we need to ensure both sides of the equality are future embeddings,
# as it is nonsensical to declare that an embedding should 'be' an object or a string, etc.
if function_type == FunctionType.EMBEDDABLE:
key = get_key(args, kwargs)
mocked_embedding = mock_behaviors.get(key, None)
# Find positive examples by matching the mocked embedding with identical embeddings in the values
# of the mock_behaviors dictionary
mock_positives_list = []
for k, v in mock_behaviors.items():
if v == mocked_embedding and k != key:
mock_positives_list.append(k)
equivalent_mocks = mock_positives_list
negative_mocks = list(mock_negatives.values())
function_modeler.save_embeddable_align_statements(hashed_description,
args,
kwargs,
equivalent_mocks,
negative_mocks)
return mocked_embedding
else:
# If we are aligning a function that returns an object
if not instance:
result = func(*args, **kwargs)
else:
result = func(instance, *args, **kwargs)
# Extract attributes from the result
attributes = extract_attributes(result)
for attr_name, attr_value in attributes.items():
# If the attribute is a list, get its length
if isinstance(attr_value, list):
attributes[attr_name] = len(attr_value)
key = get_key(args, kwargs)
mocked_behaviour = mock_behaviors.get(key, None)
function_modeler.save_symbolic_align_statements(hashed_description, args, kwargs,
mocked_behaviour)
return mocked_behaviour
# tanuki_py/src/tanuki/__init__.py
def create_mock_func(instance: Optional,
func_name: str,
description: FunctionDescription):
def mock_func(*args, **kwargs):
hashed_description = description.__hash__()
function_type, func = Register.get(func_name)
# If we are aligning a function that returns an embedding,
# we need to ensure both sides of the equality are future embeddings,
# as it is nonsensical to declare that an embedding should 'be' an object or a string, etc.
if function_type == FunctionType.EMBEDDABLE:
key = get_key(args, kwargs)
mocked_embedding = mock_behaviors.get(key, None)
# Find positive examples by matching the mocked embedding with identical embeddings in the values
# of the mock_behaviors dictionary
mock_positives_list = []
for k, v in mock_behaviors.items():
if v == mocked_embedding and k != key:
mock_positives_list.append(k)
equivalent_mocks = mock_positives_list
negative_mocks = list(mock_negatives.values())
function_modeler.save_embeddable_align_statements(hashed_description,
args,
kwargs,
equivalent_mocks,
negative_mocks)
return mocked_embedding
else:
# If we are aligning a function that returns an object
if not instance:
result = func(*args, **kwargs)
else:
result = func(instance, *args, **kwargs)
# Extract attributes from the result
attributes = extract_attributes(result)
for attr_name, attr_value in attributes.items():
# If the attribute is a list, get its length
if isinstance(attr_value, list):
attributes[attr_name] = len(attr_value)
key = get_key(args, kwargs)
mocked_behaviour = mock_behaviors.get(key, None)
function_modeler.save_symbolic_align_statements(hashed_description, args, kwargs,
mocked_behaviour)
return mocked_behaviour
return mock_func
"""
import os
from enum import Enum
from typing import Literal, Union, Optional, Dict
from appdirs import user_data_dir
from tanuki.constants import *
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.persistence.filter.filesystem_bloom import BloomFilterFileSystemDriver
from tanuki.trackers.abc_buffered_logger import ABCBufferedLogger
class FilesystemBufferedLogger(ABCBufferedLogger):
"""
A class that handles the reading and writing of patch invocations and align statements.
It includes the logic for a bloom filter, to ensure that we only store unique invocations.
"""
def __init__(self, name, level=15):
self.log_directory = self._get_log_directory()
super().__init__(name, level)
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
"""
Get an instance of the bloom filter persistence provider. Typically this will be a file system provider.
:return: A persistence provider
"""
return BloomFilterFileSystemDriver(log_directory=self.log_directory)
def get_patch_location_for_function(self, func_hash, extension: Union[
ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str:
"""
Get the local location of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
return os.path.join(self.log_directory, func_hash + extension)
def ensure_persistence_location_exists(self) -> None:
"""
Ensure that the location on the filesystem we will be writing to actually exists. If not, create it.
"""
log_directory = self.log_directory
# Create the folder if it doesn't exist
if not os.path.exists(log_directory):
os.makedirs(log_directory)
def does_object_exist(self, path: str) -> bool:
"""
Check to see if a path exists on the filesystem.
:param path:
:return:
"""
return os.path.exists(path)
def _get_log_directory(self) -> str:
"""
Find a location on the filesystem to write our logs to.
:return:
"""
filename = "functions"
# If explicitly defined
env_dir = os.getenv(ENVVAR)
if env_dir and os.path.isdir(env_dir):
return os.path.join(env_dir, filename)
# If installed as a library
library_dir = os.path.join(user_data_dir(LIB_NAME), filename)
if os.path.isdir(library_dir) or not os.path.exists(library_dir):
return library_dir
<fim_suffix>
current_dir = os.getcwd()
while current_dir != os.path.root:
if ".git" in os.listdir(current_dir):
return os.path.join(current_dir, filename)
current_dir = os.path.dirname(current_dir)
return os.path.join(os.getcwd(), filename)
def load_dataset(self, dataset_type, func_hash, return_type="both") -> Optional[int]:
"""
Get the size of the dataset for a function hash
"""
log_directory = self._get_log_directory()
dataset_type_map = {"alignments": ALIGN_FILE_EXTENSION,
"positive": POSITIVE_FILE_EXTENSION,
"negative": NEGATIVE_FILE_EXTENSION,
"patches": PATCH_FILE_EXTENSION}
log_file_path = os.path.join(log_directory, func_hash + dataset_type_map[dataset_type])
if not os.path.exists(log_file_path):
if return_type == "both":
return 0, None
elif return_type == "dataset":
return None
elif return_type == "length":
return 0
try:
with open(log_file_path, "rb") as f:
dataset = f.read()
dataset_string = repr(dataset)
dataset_length = dataset_string.count("\\n") - dataset_string.count("\\\\n")
if return_type == "both":
return dataset_length, dataset
elif return_type == "dataset":
return dataset
elif return_type == "length":
return dataset_length
except Exception as e:
if return_type == "both":
return 0, None
elif return_type == "dataset":
return None
elif return_type == "length":
return 0
def load_existing_datasets(self) -> Dict[str, Dict[str, str]]:
log_directory = self.log_directory
dataset_lengths = {
SYMBOLIC_ALIGNMENTS: {},
POSITIVE_EMBEDDABLE_ALIGNMENTS: {},
NEGATIVE_EMBEDDABLE_ALIGNMENTS: {},
PATCHES: {},
}
try:
if not os.path.exists(log_directory):
os.makedirs(log_directory)
# get all the files in the log directory
files = os.listdir(log_directory)
# discard all .json files
files = [x for x in files if ".json" not in x]
except Exception as e:
return dataset_lengths
for file in files:
if ALIGN_FILE_EXTENSION not in file \
and PATCH_FILE_EXTENSION not in file \
and POSITIVE_FILE_EXTENSION not in file \
and NEGATIVE_FILE_EXTENSION not in file:
continue
elif ALIGN_FILE_EXTENSION in file:
dataset_type = SYMBOLIC_ALIGNMENTS
elif POSITIVE_FILE_EXTENSION in file:
dataset_type = POSITIVE_EMBEDDABLE_ALIGNMENTS
elif NEGATIVE_FILE_EXTENSION in file:
dataset_type = NEGATIVE_EMBEDDABLE_ALIGNMENTS
else:
dataset_type = PATCHES
func_hash = file.replace(ALIGN_FILE_EXTENSION, "").replace(PATCH_FILE_EXTENSION, "")
dataset_lengths[dataset_type][func_hash] = -1
return dataset_lengths
def write(self, path: str, data: str, mode: Literal["w", "a", "a+b"] = "w") -> None:
"""
Write data to a file
"""
with open(path, mode) as f:
f.write(data)
def read(self, path: str) -> str:
"""
Read data from a file
"""
with open(path, "r") as f:
return f.read()
def get_hash_from_path(self, path) -> str:
"""
Given a path with a hash, return only the hash
:param path: The path to the file
:return: The hash
"""
return path.replace(PATCH_FILE_EXTENSION, ""). \
replace(self.log_directory, ""). \
lstrip("/"). \
lstrip("\\")
<fim_middle># If installed in a project that contains a git repo - place it in the same folder as the git repo | # If installed in a project that contains a git repo - place it in the same folder as the git repo | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
<fim_suffix>
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle># try to instantiate datetime | # try to instantiate datetime | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def load_existing_datasets(self) -> Dict[str, Dict[str, str]]:
log_directory = self.log_directory
dataset_lengths = {
SYMBOLIC_ALIGNMENTS: {},
POSITIVE_EMBEDDABLE_ALIGNMENTS: {},
NEGATIVE_EMBEDDABLE_ALIGNMENTS: {},
PATCHES: {},
}
try:
if not os.path.exists(log_directory):
os.makedirs(log_directory)
# get all the files in the log directory
files = os.listdir(log_directory)
# discard all .json files
files = [x for x in files if ".json" not in x]
except Exception as e:
return dataset_lengths
for file in files:
if ALIGN_FILE_EXTENSION not in file \
and PATCH_FILE_EXTENSION not in file \
and POSITIVE_FILE_EXTENSION not in file \
and NEGATIVE_FILE_EXTENSION not in file:
continue
elif ALIGN_FILE_EXTENSION in file:
dataset_type = SYMBOLIC_ALIGNMENTS
elif POSITIVE_FILE_EXTENSION in file:
dataset_type = POSITIVE_EMBEDDABLE_ALIGNMENTS
elif NEGATIVE_FILE_EXTENSION in file:
dataset_type = NEGATIVE_EMBEDDABLE_ALIGNMENTS
else:
dataset_type = PATCHES
func_hash = file.replace(ALIGN_FILE_EXTENSION, "").replace(PATCH_FILE_EXTENSION, "")
dataset_lengths[dataset_type][func_hash] = -1
return dataset_lengths
# tanuki_py/src/tanuki/models/function_config.py
def load_from_dict(self, json_dict):
"""
Load the function config from a dict
Args:
json_dict: The dict to load the function config from
Returns:
The function config
"""
self.distilled_model = config_factory.create_config(json_dict["distilled_model"], DISTILLED_MODEL)
self.current_model_stats = json_dict["current_model_stats"]
self.last_training_run = json_dict["last_training_run"]
self.current_training_run = json_dict["current_training_run"]
self.nr_of_training_runs = json_dict["nr_of_training_runs"]
if "teacher_models" in json_dict and len(json_dict["teacher_models"]) > 0:
self.teacher_models = [config_factory.create_config(teacher_model, TEACHER_MODEL) for teacher_model in json_dict["teacher_models"]]
return self
# tanuki_py/src/tanuki/models/function_config.py
def to_dict(self):
"""
Convert the function config to a dict
Returns:
The dict
"""
try:
config_dictionary = self.model_dump()
except AttributeError as e:
config_dictionary = self.dict()
return config_dictionary
"""
import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, NEGATIVE_FILE_EXTENSION, PATCH_FILE_EXTENSION
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.models.function_config import FunctionConfig
# PATCH_FILE_EXTENSION_TYPE = Literal[".patches"]
# ALIGN_FILE_EXTENSION_TYPE = Literal[".alignments"]
# POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".positive_embedding"]
# NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".negative_embedding"]
#
# PATCH_FILE_EXTENSION: PATCH_FILE_EXTENSION_TYPE = ".patches"
# ALIGN_FILE_EXTENSION: ALIGN_FILE_EXTENSION_TYPE = ".alignments"
# POSITIVE_EMBEDDING_FILE_EXTENSION: POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_positives"
# NEGATIVE_EMBEDDING_FILE_EXTENSION: NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_negatives"
#
# EXPECTED_ITEMS = 10000
# FALSE_POSITIVE_RATE = 0.01
# LIB_NAME = "tanuki"
# ENVVAR = "TANUKI_LOG_DIR"
class ABCBufferedLogger(DatasetWorker):
def __init__(self, name, level=15):
self.buffers = {}
self.mapped_files = {}
self.miss_count = 0
self.hit_count = 0
self.flush_limit = {}
self.buffer_rolling_size = {}
self.write_count = 0
self.write_limit = 1000 # Save the Bloom filter every 1000 writes
super().__init__(name, level)
self.bloom_filter = self.create_bloom_filter()
self.load_bloom_filter()
self.default_function_config = FunctionConfig()
@abstractmethod
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
"""
Get an instance of the bloom filter persistence provider. This exposes some persistent file storage,
that must support reading and writing raw byte streams.
:return:
"""
pass
@abstractmethod
def load_existing_datasets(self) -> Dict[str, Dict[str, Any]]:
"""
Get the lengths of all datasets backing the registered functions, including aligns.
:return:
"""
pass
@abstractmethod
def ensure_persistence_location_exists(self):
"""
Ensure that the place we will be writing to actually exists. If not, create it.
"""
pass
@abstractmethod
def get_patch_location_for_function(self, func_hash, extension="") -> str:
"""
Get the address of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
pass
@abstractmethod
def write(self, path, data, mode="a") -> None:
pass
@abstractmethod
def read(self, path) -> str:
pass
@abstractmethod
def get_hash_from_path(self, path) -> str:
pass
@abstractmethod
def does_object_exist(self, path) -> bool:
pass
def create_bloom_filter(self):
bloom_filter_persistence = self.get_bloom_filter_persistence()
bloom_filter = BloomFilter(
bloom_filter_persistence,
expected_number_of_elements=EXPECTED_ITEMS,
false_positive_probability=FALSE_POSITIVE_RATE)
return bloom_filter
def load_bloom_filter(self):
try:
self.bloom_filter.load()
except FileNotFoundError:
self.debug("No Bloom filter found. Creating a new one.")
def write_symbolic_align_call(self, func_hash, example) -> bool:
log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool:
if positive:
log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION)
else:
log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def log_embeddable_align(self, func_hash, example, positive=True, **kws):
"""
Log a contrastive function invocation
Args:
func_hash: A string representation of the function signature and input parameters
example: The example object
positive: Whether the example is positive or negative
**kws:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_embeddable_align_call(func_hash, example, positive)
return successfully_saved, new_datapoint
def log_symbolic_align(self, func_hash, *args, **kws):
"""
Log an align function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param args: Example objects
:param kws:
:return:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
example = args[0]
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_symbolic_align_call(func_hash, example)
return successfully_saved, new_datapoint
def log_symbolic_patch(self, func_hash, example):
"""
Log a patched function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param example:
:return:
"""
if not isinstance(func_hash, str):
func_hash = str(func_hash)
example_data = str(example.__dict__).encode('utf-8') + b'\n'
bloom_filter_representation = func_hash + '_' + example_data.decode('utf-8')
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
self.hit_count += 1
return {}
self.miss_count += 1
# Add to Bloom Filter
self.bloom_filter.add(bloom_filter_representation)
try:
self.ensure_persistence_location_exists()
except Exception as e:
return {}
log_file_path = self.get_patch_location_for_function(func_hash, extension=PATCH_FILE_EXTENSION)
if log_file_path not in self.buffers:
self.buffers[log_file_path] = bytearray()
if log_file_path not in self.flush_limit:
self.flush_limit[log_file_path] = 1
self.buffers[log_file_path].extend(example_data)
self.write_count += 1
if log_file_path not in self.buffer_rolling_size:
self.buffer_rolling_size[log_file_path] = 1
else:
self.buffer_rolling_size[log_file_path] += 1
if self.write_count >= self.write_limit:
written_datapoints = self.flush()
self.save_bloom_filter()
self.write_count = 0 # Reset counter
return written_datapoints
if len(self.buffers[log_file_path]) >= min(self.flush_limit[log_file_path], 4096): # Flush after reaching 4KB
written_datapoints = {}
try:
self.write(log_file_path, self.buffers[log_file_path], mode="a+b")
# update buffers
written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path]
self.buffers[log_file_path].clear()
self.buffer_rolling_size[log_file_path] = 0
self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path]
self.save_bloom_filter()
except Exception as e:
pass
return written_datapoints
return {}
def save_bloom_filter(self):
try:
self.bloom_filter.save()
except Exception as e:
self.warning("Could not save Bloom filter: {}".format(e))
def flush(self):
# get log directory
written_datapoints = {}
for log_file_path, buffer in self.buffers.items():
if len(buffer) > 0:
try:
self.write(log_file_path, buffer, mode="a+b")
written_datapoints[self.get_hash_from_path(log_file_path)] = self.buffer_rolling_size[log_file_path]
self.buffer_rolling_size[log_file_path] = 0
buffer.clear()
except Exception as e:
pass
return written_datapoints
def load_function_config(self, func_hash):
"""
Get the config file for the function. Uses the message and log directory
Config file has to be in .json
"""
default = False
try: # try to get the config from the disk. If inaccessible, create a new default one
self.ensure_persistence_location_exists()
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
if not self.does_object_exist(config_path):
function_config = self.default_function_config
default = True
func_config_dict = function_config.to_dict()
<fim_suffix>
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
else:
function_config = FunctionConfig().load_from_dict(self.read_json(config_path))
except Exception as e:
function_config = self.default_function_config
default = True
return function_config, default
def update_function_config(self, func_hash, config_to_be_saved):
"""
Save the config file
"""
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
try:
func_config_dict = config_to_be_saved.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
except Exception as e:
pass
def write_json(self, path, data):
self.write(path, json.dumps(data))
def read_json(self, path):
return json.loads(self.read(path))
<fim_middle># remove teacher_models from the config | # remove teacher_models from the config | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/utils.py
def get_model(content, logger, func_hash):
"""
Get the model from the content and the logger.
Decide on model depending on the length of the content. if is finetunable, return model, true, otherwise return model, false
Args:
content (str): the content to be aligned
logger (buffered logger): the logger
func_hash (str): the function hash
Returns:
model (str): the model to be used
finetunable (bool): whether the model is finetunable
"""
num_tokens = approximate_token_count(content)
finetune_limit = logger.finetune_token_limit
finetune_model, teacher_models = logger.get_models(func_hash)
if num_tokens < finetune_limit:
return finetune_model, True
else:
# this is just for backwards compatibility currently
if len(teacher_models) == 0 or isinstance(teacher_models[0], str):
teacher_models = [("gpt-4", 7000),("gpt-4-32k", 31000)]
for model, token_limit in teacher_models:
if num_tokens < token_limit:
return model, False
raise ValueError("The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
# tanuki_py/src/tanuki/language_models/embedding_model_manager.py
def get_embedding_case(self, args, function_description: FunctionDescription, kwargs, examples=None):
# example_input = f"Examples:{examples}\n" if examples else ""
content = f"Name: {function_description.name}\nArgs: {args}\nKwargs: {kwargs}"
function_hash = function_description.__hash__()
if function_hash in self.function_modeler.teacher_models_override: # check for overrides
model = self.function_modeler.teacher_models_override[function_hash][0] # take currently the first model
else:
model = DEFAULT_EMBEDDING_MODELS[DEFAULT_EMBEDDING_MODEL_NAME]
# loggings
if function_hash not in self.initialized_functions:
logging.info(f"Generating function embeddings for {function_description.name} with {model.model_name}")
self.initialized_functions[function_hash] = model.model_name
elif self.initialized_functions[function_hash] != model.model_name:
logging.info(f"Switching embeddings generation for {function_description.name} from {self.initialized_functions[function_hash]} to {model.model_name}")
self.initialized_functions[function_hash] = model.model_name
return content, model
# tanuki_py/src/tanuki/function_modeler.py
def _update_datapoint_config(self, repaired, func_hash):
"""
Update the config to reflect the new datapoint in the training data
First adds 1 to the current datapoints
Then updates running faults depending if priority is True or not and takes last 100
Then checks the revert condition, i.e if last 10 datapoints are 50% faulty
Finally updates the config file
Args:
priority (bool): whether the datapoint was fixed by the teacher model/should be added to the training data
"""
try:
if repaired:
self.function_configs[func_hash].current_model_stats["running_faults"].append(1)
else:
self.function_configs[func_hash].current_model_stats["running_faults"].append(0)
# take the last 100 datapoints
self.function_configs[func_hash].current_model_stats["running_faults"] = \
self.function_configs[func_hash].current_model_stats["running_faults"][-100:]
# check if the last 10 datapoints are 50% faulty, this is the switch condition
if sum(self.function_configs[func_hash].current_model_stats["running_faults"][-10:]) / 10 > 0.5:
self.function_configs[func_hash].distilled_model.model_name = ""
self.function_configs[func_hash].current_model_stats["trained_on_datapoints"] = 0
self.function_configs[func_hash].current_model_stats["running_faults"] = []
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file")
pass
"""
import json
from typing import Any, Dict
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.llm_api_abc import LLM_API
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.models.language_model_output import LanguageModelOutput
from tanuki.utils import approximate_token_count
from tanuki.validator import Validator
from tanuki.models.api_manager import APIManager
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
import logging
class LanguageModelManager(object):
"""
The LanguageModelManager is responsible for managing the language models and their outputs operationally,
this includes:
- Generating outputs from the language models
- Repairing outputs from the language models
- Saving outputs from the language models
- Finetuning the language models from the saved outputs
"""
def __init__(self,
function_modeler: FunctionModeler,
api_provider: APIManager,
generation_token_limit=512,) -> None:
self.api_provider = api_provider
self.function_modeler = function_modeler
self.default_generation_length = generation_token_limit
self.initialized_functions = {}
self.token_counts = {}
def __call__(self,
args,
function_description: FunctionDescription,
kwargs,
validator: Validator,
generation_parameters: dict) -> Any:
# add the generation length if not there
if "max_new_tokens" not in generation_parameters:
generation_parameters["max_new_tokens"] = self.default_generation_length
output = self.generate(args, kwargs, function_description, generation_parameters)
# start parsing the object, very hacky way for the time being
choice_parsed = self._parse_choice(output)
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
choice, choice_parsed, successful_repair = self.repair_output(args,
kwargs,
function_description,
output.generated_response,
validator,
generation_parameters)
if not successful_repair:
raise TypeError(
f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{output.generated_response}'")
output.generated_response = choice
output.distilled_model = False
datapoint = FunctionExample(args, kwargs, output.generated_response)
if output.suitable_for_finetuning and not output.distilled_model:
self.function_modeler.postprocess_symbolic_datapoint(function_description.__hash__(), function_description,
datapoint, repaired=not valid)
instantiated = validator.instantiate(choice_parsed, function_description.output_type_hint)
return instantiated
def _parse_choice(self, output):
try:
# json load
choice_parsed = json.loads(output.generated_response)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(output.generated_response)
except:
choice_parsed = output.generated_response
return choice_parsed
def generate(self, args, kwargs, function_description, llm_parameters={}):
"""
The main generation function, given the args, kwargs, function description and model type, generate a response and check if the datapoint can be saved to the finetune dataset
"""
func_hash = function_description.__hash__()
prompt, model, save_to_finetune, is_distilled_model = self.get_generation_case(args, kwargs,
function_description,
llm_parameters,
func_hash)
# loggings
current_function_setup = self.initialized_functions.get(func_hash, None) # getting the current function setup - model and align statements
if current_function_setup:
generator_model = current_function_setup["model"]
if is_distilled_model:
logging.info(f"Generating function outputs for {function_description.name} with a finetuned model: {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model == "":
logging.info(f"Found {len(current_function_setup['examples'])} align statements for {function_description.name}. Generating function outputs with {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model != model.model_name:
logging.info(f"Switching output generation from {generator_model} to {model.model_name} for function {function_description.name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
choice = self._synthesise_answer(prompt, model, llm_parameters)
output = LanguageModelOutput(choice, save_to_finetune, is_distilled_model)
return output
def _synthesise_answer(self, prompt, model, llm_parameters):
"""
Synthesise an answer given the prompt, model, model_type and llm_parameters
Args:
prompt (str): The prompt to send to the model
model (BaseModelConfig): The model to use for generation
llm_parameters (dict): The parameters to use for generation
return:
choice (str): The generated response
"""
system_message = model.system_message
return self.api_provider[model.provider].generate(model, system_message, prompt, **llm_parameters)
def get_generation_case(self, args, kwargs, function_description, llm_parameters, func_hash):
"""
Get the generation case with the correct prompt and model
First get the current model, then if distilled model, do zero-shot prompt and return False as suitable_for_finetune
If not distilled model, check if suitable for finetuning, create the prompt and return the correct model given the token count
"""
f = str(function_description.__dict__.__repr__())
distilled_model, teacher_models = self.function_modeler.get_models(function_description)
is_distilled_model = distilled_model.model_name != ""
suitable_for_distillation, input_prompt_token_count = self.suitable_for_finetuning_token_check(args, kwargs, f,
distilled_model)
if func_hash not in self.initialized_functions:
<fim_suffix>
self.initialized_functions[func_hash] = {"model": "", "examples": []}
# no examples needed, using a finetuned model. Dont save to finetune dataset
if is_distilled_model and suitable_for_distillation:
prompt = self.construct_prompt(f, args, kwargs, [], distilled_model)
return prompt, distilled_model, suitable_for_distillation, True
else:
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=16)
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
# update the examples in the initialized_functions dict
self.initialized_functions[func_hash]["examples"] = examples
examples_token_count = sum([approximate_token_count(example) for example in examples])
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(teacher_models,
examples_token_count + input_prompt_token_count + generation_tokens,
len(examples))
if model:
examples_with_parsing_tokens = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput:{model.parsing_helper_tokens['start_token']}{align['output']}{model.parsing_helper_tokens['end_token']}" for align in
aligns]
prompt = self.construct_prompt(f, args, kwargs, examples_with_parsing_tokens, model)
return prompt, model, suitable_for_distillation, False
else:
raise ValueError(
"The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
def suitable_for_finetuning_token_check(self, args, kwargs, f, distilled_model: BaseModelConfig):
"""
Check if the inputs are suitable for finetuning, i.e are below the finetuning token count
"""
# check if finetunable
finetuning_prompt = f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
input_prompt_token_count = approximate_token_count(finetuning_prompt)
if distilled_model.system_message_token_count < 0:
distilled_model.system_message_token_count = approximate_token_count(distilled_model.system_message)
if distilled_model.instruction_token_count < 0:
distilled_model.instruction_token_count = approximate_token_count(distilled_model.instructions)
suitable_for_finetune = input_prompt_token_count + distilled_model.instruction_token_count + distilled_model.system_message_token_count < distilled_model.context_length
return suitable_for_finetune, input_prompt_token_count
def construct_prompt(self, f, args, kwargs, examples, model):
"""
Construct a prompt given the model, function description, args, kwargs and examples
Args:
model (BaseModelConfig): The model to use for generation
f (str): The function description
args (tuple): The args of the function
kwargs (tuple): The kwargs of the function
examples (list): The examples of the function
Returns:
content (str): The prompt to send to the model
"""
if examples:
final_examples = "\n".join(
[f"{align}" for align in
examples])
example_input = f"Examples:{final_examples}\n"
else:
example_input = ""
instruction_prompt = model.instructions
content = f"{instruction_prompt}\nFunction: {f}\n{example_input}---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
return content
def repair_generate(self, args, kwargs, f, failed_outputs_list, aligns, models, llm_parameters):
"""
Repair the output given the input, function description, failed outputs list, examples and models
"""
# get the token counts
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
examples_token_count = sum([approximate_token_count(example) for example in examples])
failed_examples_token_count = sum([approximate_token_count(failed_output[0]) + approximate_token_count(failed_output[1]) for failed_output in failed_outputs_list])
input_prompt_token_count = approximate_token_count(f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:")
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(models,
examples_token_count+input_prompt_token_count+generation_tokens+failed_examples_token_count,
len(examples))
if model:
prompt = self.generate_repair_prompt(args, kwargs, f, failed_outputs_list, examples, model)
logging.info(f"Previous output failed type validation, attempting to repair with {model.model_name}")
choice = self._synthesise_answer(prompt, model, llm_parameters)
return choice
else:
return None
def generate_repair_prompt(self, args, kwargs, f, failed_outputs_list, examples, model):
"""
Generate a repair prompt given the args, kwargs, function description, failed outputs list and examples
"""
if examples:
final_examples = "\n".join(
[f"{model.parsing_helper_tokens['start_token']}{align}{model.parsing_helper_tokens['end_token']}" for align in
examples])
successful_examples = f"Examples:{final_examples}\n"
else:
successful_examples = ""
failed_examples = ""
for failed_output in failed_outputs_list:
failed_examples += f"Output: {failed_output[0]}\nError: {failed_output[1]}\n\n"
end_token_addition = ""
if model.parsing_helper_tokens["end_token"]:
end_token_addition = f"Make sure to add the {model.parsing_helper_tokens['end_token']} token at the end of the output."
prompt = f"{model.repair_instruction}{end_token_addition}\nFUNCTION DESCRIPTION: {f}\n{successful_examples}---{model.parsing_helper_tokens['start_token']}Inputs:\nArgs: {args}\nKwargs: {kwargs}\nFAILED EXAMPLES: {failed_examples}Correct output:"
return prompt
def choose_model_from_tokens(self, models, input_token_count, nr_of_examples=0):
"""
Choose a model from the models given the token count and number of examples
Args:
models (list): The models to choose from
input_token_count (int): The token count of the input
nr_of_examples (int): The number of examples
Returns:
model (BaseModelConfig): The chosen model
"""
for model in models:
# check if input token count is less than the context length
# If the model config has custom messages, then use those, otherwise use the default ones
if model.system_message_token_count < 0:
model.system_message_token_count = approximate_token_count(model.system_message)
if model.instruction_token_count < 0:
model.instruction_token_count = approximate_token_count(model.instructions)
if model.parsing_helper_tokens["start_token"]:
input_token_count += 2*nr_of_examples
if model.parsing_helper_tokens["end_token"]:
input_token_count += 2*nr_of_examples
total_token_count = input_token_count + model.instruction_token_count + model.system_message_token_count
if total_token_count < model.context_length:
return model
return None
def repair_output(self,
args: tuple,
kwargs: dict,
function_description: FunctionDescription,
choice,
validator: Validator,
generation_parameters: dict) -> tuple:
"""
Repair an output, that failed type validation by generating a new output using the teacher model and the error
Args:
args (tuple): The args of the function
kwargs (dict): The kwargs of the function
function_description (FunctionDescription): The function description
choice: The output that failed type validation, type is arbitrary
validator (Validator): The validator object
Returns:
choice (str): The choice that was generated by the language model
choice_parsed: The parsed choice, type is arbitrary
valid (bool): Whether the output was correctly repaired was valid
"""
# get the teacher models
teacher_models = self.function_modeler.get_models(function_description)[1]
valid = False
retry_index = 5
f = str(function_description.__dict__.__repr__() + "\n")
error = f"Output type was not valid. Expected an valid object of type {function_description.output_type_hint}, got '{choice}'"
# instantiate the failed outputs list
failed_outputs_list = [(choice, error)]
while retry_index > 0 and not valid:
# get the alignments
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=5)
# Generate the reparied LLM output
choice = self.repair_generate(args,
kwargs,
f,
failed_outputs_list,
aligns,
teacher_models,
generation_parameters)
if not choice:
# if no choice then the input was too long for the model
# no specific error but the retry index goes down
retry_index -= 1
continue
# start parsing the object
try:
# json load
choice_parsed = json.loads(choice)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(choice)
except:
choice_parsed = choice
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
# if it's not valid, add it to the failed outputs list
error = f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{choice}'"
failed_outputs_list.append((choice, error))
retry_index -= 1
if valid:
logging.info(f"Successfully repaired output.")
return choice, choice_parsed, valid
<fim_middle># initialise the initialized_functions dict | # initialise the initialized_functions dict | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/utils.py
def get_model(content, logger, func_hash):
"""
Get the model from the content and the logger.
Decide on model depending on the length of the content. if is finetunable, return model, true, otherwise return model, false
Args:
content (str): the content to be aligned
logger (buffered logger): the logger
func_hash (str): the function hash
Returns:
model (str): the model to be used
finetunable (bool): whether the model is finetunable
"""
num_tokens = approximate_token_count(content)
finetune_limit = logger.finetune_token_limit
finetune_model, teacher_models = logger.get_models(func_hash)
if num_tokens < finetune_limit:
return finetune_model, True
else:
# this is just for backwards compatibility currently
if len(teacher_models) == 0 or isinstance(teacher_models[0], str):
teacher_models = [("gpt-4", 7000),("gpt-4-32k", 31000)]
for model, token_limit in teacher_models:
if num_tokens < token_limit:
return model, False
raise ValueError("The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
# tanuki_py/src/tanuki/language_models/embedding_model_manager.py
def get_embedding_case(self, args, function_description: FunctionDescription, kwargs, examples=None):
# example_input = f"Examples:{examples}\n" if examples else ""
content = f"Name: {function_description.name}\nArgs: {args}\nKwargs: {kwargs}"
function_hash = function_description.__hash__()
if function_hash in self.function_modeler.teacher_models_override: # check for overrides
model = self.function_modeler.teacher_models_override[function_hash][0] # take currently the first model
else:
model = DEFAULT_EMBEDDING_MODELS[DEFAULT_EMBEDDING_MODEL_NAME]
# loggings
if function_hash not in self.initialized_functions:
logging.info(f"Generating function embeddings for {function_description.name} with {model.model_name}")
self.initialized_functions[function_hash] = model.model_name
elif self.initialized_functions[function_hash] != model.model_name:
logging.info(f"Switching embeddings generation for {function_description.name} from {self.initialized_functions[function_hash]} to {model.model_name}")
self.initialized_functions[function_hash] = model.model_name
return content, model
# tanuki_py/src/tanuki/function_modeler.py
def _update_datapoint_config(self, repaired, func_hash):
"""
Update the config to reflect the new datapoint in the training data
First adds 1 to the current datapoints
Then updates running faults depending if priority is True or not and takes last 100
Then checks the revert condition, i.e if last 10 datapoints are 50% faulty
Finally updates the config file
Args:
priority (bool): whether the datapoint was fixed by the teacher model/should be added to the training data
"""
try:
if repaired:
self.function_configs[func_hash].current_model_stats["running_faults"].append(1)
else:
self.function_configs[func_hash].current_model_stats["running_faults"].append(0)
# take the last 100 datapoints
self.function_configs[func_hash].current_model_stats["running_faults"] = \
self.function_configs[func_hash].current_model_stats["running_faults"][-100:]
# check if the last 10 datapoints are 50% faulty, this is the switch condition
if sum(self.function_configs[func_hash].current_model_stats["running_faults"][-10:]) / 10 > 0.5:
self.function_configs[func_hash].distilled_model.model_name = ""
self.function_configs[func_hash].current_model_stats["trained_on_datapoints"] = 0
self.function_configs[func_hash].current_model_stats["running_faults"] = []
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file")
pass
"""
import json
from typing import Any, Dict
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.llm_api_abc import LLM_API
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.models.language_model_output import LanguageModelOutput
from tanuki.utils import approximate_token_count
from tanuki.validator import Validator
from tanuki.models.api_manager import APIManager
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
import logging
class LanguageModelManager(object):
"""
The LanguageModelManager is responsible for managing the language models and their outputs operationally,
this includes:
- Generating outputs from the language models
- Repairing outputs from the language models
- Saving outputs from the language models
- Finetuning the language models from the saved outputs
"""
def __init__(self,
function_modeler: FunctionModeler,
api_provider: APIManager,
generation_token_limit=512,) -> None:
self.api_provider = api_provider
self.function_modeler = function_modeler
self.default_generation_length = generation_token_limit
self.initialized_functions = {}
self.token_counts = {}
def __call__(self,
args,
function_description: FunctionDescription,
kwargs,
validator: Validator,
generation_parameters: dict) -> Any:
# add the generation length if not there
if "max_new_tokens" not in generation_parameters:
generation_parameters["max_new_tokens"] = self.default_generation_length
output = self.generate(args, kwargs, function_description, generation_parameters)
# start parsing the object, very hacky way for the time being
choice_parsed = self._parse_choice(output)
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
choice, choice_parsed, successful_repair = self.repair_output(args,
kwargs,
function_description,
output.generated_response,
validator,
generation_parameters)
if not successful_repair:
raise TypeError(
f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{output.generated_response}'")
output.generated_response = choice
output.distilled_model = False
datapoint = FunctionExample(args, kwargs, output.generated_response)
if output.suitable_for_finetuning and not output.distilled_model:
self.function_modeler.postprocess_symbolic_datapoint(function_description.__hash__(), function_description,
datapoint, repaired=not valid)
instantiated = validator.instantiate(choice_parsed, function_description.output_type_hint)
return instantiated
def _parse_choice(self, output):
try:
# json load
choice_parsed = json.loads(output.generated_response)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(output.generated_response)
except:
choice_parsed = output.generated_response
return choice_parsed
def generate(self, args, kwargs, function_description, llm_parameters={}):
"""
The main generation function, given the args, kwargs, function description and model type, generate a response and check if the datapoint can be saved to the finetune dataset
"""
func_hash = function_description.__hash__()
prompt, model, save_to_finetune, is_distilled_model = self.get_generation_case(args, kwargs,
function_description,
llm_parameters,
func_hash)
# loggings
current_function_setup = self.initialized_functions.get(func_hash, None) # getting the current function setup - model and align statements
if current_function_setup:
generator_model = current_function_setup["model"]
if is_distilled_model:
logging.info(f"Generating function outputs for {function_description.name} with a finetuned model: {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model == "":
logging.info(f"Found {len(current_function_setup['examples'])} align statements for {function_description.name}. Generating function outputs with {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model != model.model_name:
logging.info(f"Switching output generation from {generator_model} to {model.model_name} for function {function_description.name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
choice = self._synthesise_answer(prompt, model, llm_parameters)
output = LanguageModelOutput(choice, save_to_finetune, is_distilled_model)
return output
def _synthesise_answer(self, prompt, model, llm_parameters):
"""
Synthesise an answer given the prompt, model, model_type and llm_parameters
Args:
prompt (str): The prompt to send to the model
model (BaseModelConfig): The model to use for generation
llm_parameters (dict): The parameters to use for generation
return:
choice (str): The generated response
"""
system_message = model.system_message
return self.api_provider[model.provider].generate(model, system_message, prompt, **llm_parameters)
def get_generation_case(self, args, kwargs, function_description, llm_parameters, func_hash):
"""
Get the generation case with the correct prompt and model
First get the current model, then if distilled model, do zero-shot prompt and return False as suitable_for_finetune
If not distilled model, check if suitable for finetuning, create the prompt and return the correct model given the token count
"""
f = str(function_description.__dict__.__repr__())
distilled_model, teacher_models = self.function_modeler.get_models(function_description)
is_distilled_model = distilled_model.model_name != ""
suitable_for_distillation, input_prompt_token_count = self.suitable_for_finetuning_token_check(args, kwargs, f,
distilled_model)
if func_hash not in self.initialized_functions:
# initialise the initialized_functions dict
self.initialized_functions[func_hash] = {"model": "", "examples": []}
# no examples needed, using a finetuned model. Dont save to finetune dataset
if is_distilled_model and suitable_for_distillation:
prompt = self.construct_prompt(f, args, kwargs, [], distilled_model)
return prompt, distilled_model, suitable_for_distillation, True
else:
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=16)
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
<fim_suffix>
self.initialized_functions[func_hash]["examples"] = examples
examples_token_count = sum([approximate_token_count(example) for example in examples])
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(teacher_models,
examples_token_count + input_prompt_token_count + generation_tokens,
len(examples))
if model:
examples_with_parsing_tokens = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput:{model.parsing_helper_tokens['start_token']}{align['output']}{model.parsing_helper_tokens['end_token']}" for align in
aligns]
prompt = self.construct_prompt(f, args, kwargs, examples_with_parsing_tokens, model)
return prompt, model, suitable_for_distillation, False
else:
raise ValueError(
"The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
def suitable_for_finetuning_token_check(self, args, kwargs, f, distilled_model: BaseModelConfig):
"""
Check if the inputs are suitable for finetuning, i.e are below the finetuning token count
"""
# check if finetunable
finetuning_prompt = f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
input_prompt_token_count = approximate_token_count(finetuning_prompt)
if distilled_model.system_message_token_count < 0:
distilled_model.system_message_token_count = approximate_token_count(distilled_model.system_message)
if distilled_model.instruction_token_count < 0:
distilled_model.instruction_token_count = approximate_token_count(distilled_model.instructions)
suitable_for_finetune = input_prompt_token_count + distilled_model.instruction_token_count + distilled_model.system_message_token_count < distilled_model.context_length
return suitable_for_finetune, input_prompt_token_count
def construct_prompt(self, f, args, kwargs, examples, model):
"""
Construct a prompt given the model, function description, args, kwargs and examples
Args:
model (BaseModelConfig): The model to use for generation
f (str): The function description
args (tuple): The args of the function
kwargs (tuple): The kwargs of the function
examples (list): The examples of the function
Returns:
content (str): The prompt to send to the model
"""
if examples:
final_examples = "\n".join(
[f"{align}" for align in
examples])
example_input = f"Examples:{final_examples}\n"
else:
example_input = ""
instruction_prompt = model.instructions
content = f"{instruction_prompt}\nFunction: {f}\n{example_input}---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
return content
def repair_generate(self, args, kwargs, f, failed_outputs_list, aligns, models, llm_parameters):
"""
Repair the output given the input, function description, failed outputs list, examples and models
"""
# get the token counts
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
examples_token_count = sum([approximate_token_count(example) for example in examples])
failed_examples_token_count = sum([approximate_token_count(failed_output[0]) + approximate_token_count(failed_output[1]) for failed_output in failed_outputs_list])
input_prompt_token_count = approximate_token_count(f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:")
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(models,
examples_token_count+input_prompt_token_count+generation_tokens+failed_examples_token_count,
len(examples))
if model:
prompt = self.generate_repair_prompt(args, kwargs, f, failed_outputs_list, examples, model)
logging.info(f"Previous output failed type validation, attempting to repair with {model.model_name}")
choice = self._synthesise_answer(prompt, model, llm_parameters)
return choice
else:
return None
def generate_repair_prompt(self, args, kwargs, f, failed_outputs_list, examples, model):
"""
Generate a repair prompt given the args, kwargs, function description, failed outputs list and examples
"""
if examples:
final_examples = "\n".join(
[f"{model.parsing_helper_tokens['start_token']}{align}{model.parsing_helper_tokens['end_token']}" for align in
examples])
successful_examples = f"Examples:{final_examples}\n"
else:
successful_examples = ""
failed_examples = ""
for failed_output in failed_outputs_list:
failed_examples += f"Output: {failed_output[0]}\nError: {failed_output[1]}\n\n"
end_token_addition = ""
if model.parsing_helper_tokens["end_token"]:
end_token_addition = f"Make sure to add the {model.parsing_helper_tokens['end_token']} token at the end of the output."
prompt = f"{model.repair_instruction}{end_token_addition}\nFUNCTION DESCRIPTION: {f}\n{successful_examples}---{model.parsing_helper_tokens['start_token']}Inputs:\nArgs: {args}\nKwargs: {kwargs}\nFAILED EXAMPLES: {failed_examples}Correct output:"
return prompt
def choose_model_from_tokens(self, models, input_token_count, nr_of_examples=0):
"""
Choose a model from the models given the token count and number of examples
Args:
models (list): The models to choose from
input_token_count (int): The token count of the input
nr_of_examples (int): The number of examples
Returns:
model (BaseModelConfig): The chosen model
"""
for model in models:
# check if input token count is less than the context length
# If the model config has custom messages, then use those, otherwise use the default ones
if model.system_message_token_count < 0:
model.system_message_token_count = approximate_token_count(model.system_message)
if model.instruction_token_count < 0:
model.instruction_token_count = approximate_token_count(model.instructions)
if model.parsing_helper_tokens["start_token"]:
input_token_count += 2*nr_of_examples
if model.parsing_helper_tokens["end_token"]:
input_token_count += 2*nr_of_examples
total_token_count = input_token_count + model.instruction_token_count + model.system_message_token_count
if total_token_count < model.context_length:
return model
return None
def repair_output(self,
args: tuple,
kwargs: dict,
function_description: FunctionDescription,
choice,
validator: Validator,
generation_parameters: dict) -> tuple:
"""
Repair an output, that failed type validation by generating a new output using the teacher model and the error
Args:
args (tuple): The args of the function
kwargs (dict): The kwargs of the function
function_description (FunctionDescription): The function description
choice: The output that failed type validation, type is arbitrary
validator (Validator): The validator object
Returns:
choice (str): The choice that was generated by the language model
choice_parsed: The parsed choice, type is arbitrary
valid (bool): Whether the output was correctly repaired was valid
"""
# get the teacher models
teacher_models = self.function_modeler.get_models(function_description)[1]
valid = False
retry_index = 5
f = str(function_description.__dict__.__repr__() + "\n")
error = f"Output type was not valid. Expected an valid object of type {function_description.output_type_hint}, got '{choice}'"
# instantiate the failed outputs list
failed_outputs_list = [(choice, error)]
while retry_index > 0 and not valid:
# get the alignments
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=5)
# Generate the reparied LLM output
choice = self.repair_generate(args,
kwargs,
f,
failed_outputs_list,
aligns,
teacher_models,
generation_parameters)
if not choice:
# if no choice then the input was too long for the model
# no specific error but the retry index goes down
retry_index -= 1
continue
# start parsing the object
try:
# json load
choice_parsed = json.loads(choice)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(choice)
except:
choice_parsed = choice
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
# if it's not valid, add it to the failed outputs list
error = f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{choice}'"
failed_outputs_list.append((choice, error))
retry_index -= 1
if valid:
logging.info(f"Successfully repaired output.")
return choice, choice_parsed, valid
<fim_middle># update the examples in the initialized_functions dict | # update the examples in the initialized_functions dict | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
<fim_suffix>
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle># If the target type is a built-in, attempt to instantiate and return | # If the target type is a built-in, attempt to instantiate and return | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
<fim_suffix>
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle># backwards compatibility with pydantic < 2 | # backwards compatibility with pydantic < 2 | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
<fim_suffix>
return data
<fim_middle># If none of the above, return the data as-is | # If none of the above, return the data as-is | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/utils.py
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
<fim_suffix>
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle># check that all required arguments are in value and do type checking | # check that all required arguments are in value and do type checking | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/abc_buffered_logger.py
def ensure_persistence_location_exists(self):
"""
Ensure that the place we will be writing to actually exists. If not, create it.
"""
pass
# tanuki_py/src/tanuki/__init__.py
def mock_func(*args, **kwargs):
hashed_description = description.__hash__()
function_type, func = Register.get(func_name)
# If we are aligning a function that returns an embedding,
# we need to ensure both sides of the equality are future embeddings,
# as it is nonsensical to declare that an embedding should 'be' an object or a string, etc.
if function_type == FunctionType.EMBEDDABLE:
key = get_key(args, kwargs)
mocked_embedding = mock_behaviors.get(key, None)
# Find positive examples by matching the mocked embedding with identical embeddings in the values
# of the mock_behaviors dictionary
mock_positives_list = []
for k, v in mock_behaviors.items():
if v == mocked_embedding and k != key:
mock_positives_list.append(k)
equivalent_mocks = mock_positives_list
negative_mocks = list(mock_negatives.values())
function_modeler.save_embeddable_align_statements(hashed_description,
args,
kwargs,
equivalent_mocks,
negative_mocks)
return mocked_embedding
else:
# If we are aligning a function that returns an object
if not instance:
result = func(*args, **kwargs)
else:
result = func(instance, *args, **kwargs)
# Extract attributes from the result
attributes = extract_attributes(result)
for attr_name, attr_value in attributes.items():
# If the attribute is a list, get its length
if isinstance(attr_value, list):
attributes[attr_name] = len(attr_value)
key = get_key(args, kwargs)
mocked_behaviour = mock_behaviors.get(key, None)
function_modeler.save_symbolic_align_statements(hashed_description, args, kwargs,
mocked_behaviour)
return mocked_behaviour
# tanuki_py/src/tanuki/__init__.py
def create_mock_func(instance: Optional,
func_name: str,
description: FunctionDescription):
def mock_func(*args, **kwargs):
hashed_description = description.__hash__()
function_type, func = Register.get(func_name)
# If we are aligning a function that returns an embedding,
# we need to ensure both sides of the equality are future embeddings,
# as it is nonsensical to declare that an embedding should 'be' an object or a string, etc.
if function_type == FunctionType.EMBEDDABLE:
key = get_key(args, kwargs)
mocked_embedding = mock_behaviors.get(key, None)
# Find positive examples by matching the mocked embedding with identical embeddings in the values
# of the mock_behaviors dictionary
mock_positives_list = []
for k, v in mock_behaviors.items():
if v == mocked_embedding and k != key:
mock_positives_list.append(k)
equivalent_mocks = mock_positives_list
negative_mocks = list(mock_negatives.values())
function_modeler.save_embeddable_align_statements(hashed_description,
args,
kwargs,
equivalent_mocks,
negative_mocks)
return mocked_embedding
else:
# If we are aligning a function that returns an object
if not instance:
result = func(*args, **kwargs)
else:
result = func(instance, *args, **kwargs)
# Extract attributes from the result
attributes = extract_attributes(result)
for attr_name, attr_value in attributes.items():
# If the attribute is a list, get its length
if isinstance(attr_value, list):
attributes[attr_name] = len(attr_value)
key = get_key(args, kwargs)
mocked_behaviour = mock_behaviors.get(key, None)
function_modeler.save_symbolic_align_statements(hashed_description, args, kwargs,
mocked_behaviour)
return mocked_behaviour
return mock_func
"""
import os
from enum import Enum
from typing import Literal, Union, Optional, Dict
from appdirs import user_data_dir
from tanuki.constants import *
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.persistence.filter.filesystem_bloom import BloomFilterFileSystemDriver
from tanuki.trackers.abc_buffered_logger import ABCBufferedLogger
class FilesystemBufferedLogger(ABCBufferedLogger):
"""
A class that handles the reading and writing of patch invocations and align statements.
It includes the logic for a bloom filter, to ensure that we only store unique invocations.
"""
def __init__(self, name, level=15):
self.log_directory = self._get_log_directory()
super().__init__(name, level)
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
"""
Get an instance of the bloom filter persistence provider. Typically this will be a file system provider.
:return: A persistence provider
"""
return BloomFilterFileSystemDriver(log_directory=self.log_directory)
def get_patch_location_for_function(self, func_hash, extension: Union[
ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str:
"""
Get the local location of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
return os.path.join(self.log_directory, func_hash + extension)
def ensure_persistence_location_exists(self) -> None:
"""
Ensure that the location on the filesystem we will be writing to actually exists. If not, create it.
"""
log_directory = self.log_directory
# Create the folder if it doesn't exist
if not os.path.exists(log_directory):
os.makedirs(log_directory)
def does_object_exist(self, path: str) -> bool:
"""
Check to see if a path exists on the filesystem.
:param path:
:return:
"""
return os.path.exists(path)
def _get_log_directory(self) -> str:
"""
Find a location on the filesystem to write our logs to.
:return:
"""
filename = "functions"
# If explicitly defined
env_dir = os.getenv(ENVVAR)
if env_dir and os.path.isdir(env_dir):
return os.path.join(env_dir, filename)
<fim_suffix>
library_dir = os.path.join(user_data_dir(LIB_NAME), filename)
if os.path.isdir(library_dir) or not os.path.exists(library_dir):
return library_dir
# If installed in a project that contains a git repo - place it in the same folder as the git repo
current_dir = os.getcwd()
while current_dir != os.path.root:
if ".git" in os.listdir(current_dir):
return os.path.join(current_dir, filename)
current_dir = os.path.dirname(current_dir)
return os.path.join(os.getcwd(), filename)
def load_dataset(self, dataset_type, func_hash, return_type="both") -> Optional[int]:
"""
Get the size of the dataset for a function hash
"""
log_directory = self._get_log_directory()
dataset_type_map = {"alignments": ALIGN_FILE_EXTENSION,
"positive": POSITIVE_FILE_EXTENSION,
"negative": NEGATIVE_FILE_EXTENSION,
"patches": PATCH_FILE_EXTENSION}
log_file_path = os.path.join(log_directory, func_hash + dataset_type_map[dataset_type])
if not os.path.exists(log_file_path):
if return_type == "both":
return 0, None
elif return_type == "dataset":
return None
elif return_type == "length":
return 0
try:
with open(log_file_path, "rb") as f:
dataset = f.read()
dataset_string = repr(dataset)
dataset_length = dataset_string.count("\\n") - dataset_string.count("\\\\n")
if return_type == "both":
return dataset_length, dataset
elif return_type == "dataset":
return dataset
elif return_type == "length":
return dataset_length
except Exception as e:
if return_type == "both":
return 0, None
elif return_type == "dataset":
return None
elif return_type == "length":
return 0
def load_existing_datasets(self) -> Dict[str, Dict[str, str]]:
log_directory = self.log_directory
dataset_lengths = {
SYMBOLIC_ALIGNMENTS: {},
POSITIVE_EMBEDDABLE_ALIGNMENTS: {},
NEGATIVE_EMBEDDABLE_ALIGNMENTS: {},
PATCHES: {},
}
try:
if not os.path.exists(log_directory):
os.makedirs(log_directory)
# get all the files in the log directory
files = os.listdir(log_directory)
# discard all .json files
files = [x for x in files if ".json" not in x]
except Exception as e:
return dataset_lengths
for file in files:
if ALIGN_FILE_EXTENSION not in file \
and PATCH_FILE_EXTENSION not in file \
and POSITIVE_FILE_EXTENSION not in file \
and NEGATIVE_FILE_EXTENSION not in file:
continue
elif ALIGN_FILE_EXTENSION in file:
dataset_type = SYMBOLIC_ALIGNMENTS
elif POSITIVE_FILE_EXTENSION in file:
dataset_type = POSITIVE_EMBEDDABLE_ALIGNMENTS
elif NEGATIVE_FILE_EXTENSION in file:
dataset_type = NEGATIVE_EMBEDDABLE_ALIGNMENTS
else:
dataset_type = PATCHES
func_hash = file.replace(ALIGN_FILE_EXTENSION, "").replace(PATCH_FILE_EXTENSION, "")
dataset_lengths[dataset_type][func_hash] = -1
return dataset_lengths
def write(self, path: str, data: str, mode: Literal["w", "a", "a+b"] = "w") -> None:
"""
Write data to a file
"""
with open(path, mode) as f:
f.write(data)
def read(self, path: str) -> str:
"""
Read data from a file
"""
with open(path, "r") as f:
return f.read()
def get_hash_from_path(self, path) -> str:
"""
Given a path with a hash, return only the hash
:param path: The path to the file
:return: The hash
"""
return path.replace(PATCH_FILE_EXTENSION, ""). \
replace(self.log_directory, ""). \
lstrip("/"). \
lstrip("\\")
<fim_middle># If installed as a library | # If installed as a library | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
<fim_suffix>
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>try:
return int(float(data)) | try:
return int(float(data)) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _save_contrastive_alignment_pair(self, function_hash: str, args, kwargs, pair, positive=True):
"""
Save a contrastive pair
"""
example = FunctionExample(args, kwargs, pair)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_embeddable_align(function_hash, example, positive)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if positive:
if function_hash in self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if not positive:
if function_hash in self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.embeddable_align_buffer:
self.embeddable_align_buffer[function_hash] = bytearray()
self.embeddable_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
# tanuki_py/src/tanuki/function_modeler.py
def save_symbolic_align_statements(self, function_hash, args, kwargs, output):
"""
Save the align statements and add to the align buffer
Do not save if the function hash is in the store data blacklist
Then just add the datapoints to the align buffer
"""
# prepare output for saving and later parsing
# make a deepcopy of the output to avoid changing the original object
copy_output = copy.deepcopy(output)
parsed_output = prepare_object_for_saving(copy_output)
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
example = FunctionExample(parsed_args, parsed_kwargs, parsed_output)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_symbolic_align(function_hash, example)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if function_hash in self.dataset_sizes[SYMBOLIC_ALIGNMENTS]:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.symbolic_align_buffer:
self.symbolic_align_buffer[function_hash] = bytearray()
self.symbolic_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def get_patch_location_for_function(self, func_hash, extension: Union[
ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str:
"""
Get the local location of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
return os.path.join(self.log_directory, func_hash + extension)
"""
import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, NEGATIVE_FILE_EXTENSION, PATCH_FILE_EXTENSION
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.models.function_config import FunctionConfig
# PATCH_FILE_EXTENSION_TYPE = Literal[".patches"]
# ALIGN_FILE_EXTENSION_TYPE = Literal[".alignments"]
# POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".positive_embedding"]
# NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".negative_embedding"]
#
# PATCH_FILE_EXTENSION: PATCH_FILE_EXTENSION_TYPE = ".patches"
# ALIGN_FILE_EXTENSION: ALIGN_FILE_EXTENSION_TYPE = ".alignments"
# POSITIVE_EMBEDDING_FILE_EXTENSION: POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_positives"
# NEGATIVE_EMBEDDING_FILE_EXTENSION: NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_negatives"
#
# EXPECTED_ITEMS = 10000
# FALSE_POSITIVE_RATE = 0.01
# LIB_NAME = "tanuki"
# ENVVAR = "TANUKI_LOG_DIR"
class ABCBufferedLogger(DatasetWorker):
def __init__(self, name, level=15):
self.buffers = {}
self.mapped_files = {}
self.miss_count = 0
self.hit_count = 0
self.flush_limit = {}
self.buffer_rolling_size = {}
self.write_count = 0
self.write_limit = 1000 # Save the Bloom filter every 1000 writes
super().__init__(name, level)
self.bloom_filter = self.create_bloom_filter()
self.load_bloom_filter()
self.default_function_config = FunctionConfig()
@abstractmethod
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
"""
Get an instance of the bloom filter persistence provider. This exposes some persistent file storage,
that must support reading and writing raw byte streams.
:return:
"""
pass
@abstractmethod
def load_existing_datasets(self) -> Dict[str, Dict[str, Any]]:
"""
Get the lengths of all datasets backing the registered functions, including aligns.
:return:
"""
pass
@abstractmethod
def ensure_persistence_location_exists(self):
"""
Ensure that the place we will be writing to actually exists. If not, create it.
"""
pass
@abstractmethod
def get_patch_location_for_function(self, func_hash, extension="") -> str:
"""
Get the address of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
pass
@abstractmethod
def write(self, path, data, mode="a") -> None:
pass
@abstractmethod
def read(self, path) -> str:
pass
@abstractmethod
def get_hash_from_path(self, path) -> str:
pass
@abstractmethod
def does_object_exist(self, path) -> bool:
pass
def create_bloom_filter(self):
bloom_filter_persistence = self.get_bloom_filter_persistence()
bloom_filter = BloomFilter(
bloom_filter_persistence,
expected_number_of_elements=EXPECTED_ITEMS,
false_positive_probability=FALSE_POSITIVE_RATE)
return bloom_filter
def load_bloom_filter(self):
try:
self.bloom_filter.load()
except FileNotFoundError:
self.debug("No Bloom filter found. Creating a new one.")
def write_symbolic_align_call(self, func_hash, example) -> bool:
log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool:
if positive:
log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION)
else:
log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def log_embeddable_align(self, func_hash, example, positive=True, **kws):
"""
Log a contrastive function invocation
Args:
func_hash: A string representation of the function signature and input parameters
example: The example object
positive: Whether the example is positive or negative
**kws:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_embeddable_align_call(func_hash, example, positive)
return successfully_saved, new_datapoint
def log_symbolic_align(self, func_hash, *args, **kws):
"""
Log an align function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param args: Example objects
:param kws:
:return:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
example = args[0]
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_symbolic_align_call(func_hash, example)
return successfully_saved, new_datapoint
def log_symbolic_patch(self, func_hash, example):
"""
Log a patched function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param example:
:return:
"""
if not isinstance(func_hash, str):
func_hash = str(func_hash)
example_data = str(example.__dict__).encode('utf-8') + b'\n'
bloom_filter_representation = func_hash + '_' + example_data.decode('utf-8')
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
self.hit_count += 1
return {}
self.miss_count += 1
# Add to Bloom Filter
self.bloom_filter.add(bloom_filter_representation)
try:
self.ensure_persistence_location_exists()
except Exception as e:
return {}
log_file_path = self.get_patch_location_for_function(func_hash, extension=PATCH_FILE_EXTENSION)
if log_file_path not in self.buffers:
self.buffers[log_file_path] = bytearray()
if log_file_path not in self.flush_limit:
self.flush_limit[log_file_path] = 1
self.buffers[log_file_path].extend(example_data)
self.write_count += 1
if log_file_path not in self.buffer_rolling_size:
self.buffer_rolling_size[log_file_path] = 1
else:
self.buffer_rolling_size[log_file_path] += 1
if self.write_count >= self.write_limit:
written_datapoints = self.flush()
self.save_bloom_filter()
self.write_count = 0 # Reset counter
return written_datapoints
if len(self.buffers[log_file_path]) >= min(self.flush_limit[log_file_path], 4096): # Flush after reaching 4KB
written_datapoints = {}
<fim_suffix>
except Exception as e:
pass
return written_datapoints
return {}
def save_bloom_filter(self):
try:
self.bloom_filter.save()
except Exception as e:
self.warning("Could not save Bloom filter: {}".format(e))
def flush(self):
# get log directory
written_datapoints = {}
for log_file_path, buffer in self.buffers.items():
if len(buffer) > 0:
try:
self.write(log_file_path, buffer, mode="a+b")
written_datapoints[self.get_hash_from_path(log_file_path)] = self.buffer_rolling_size[log_file_path]
self.buffer_rolling_size[log_file_path] = 0
buffer.clear()
except Exception as e:
pass
return written_datapoints
def load_function_config(self, func_hash):
"""
Get the config file for the function. Uses the message and log directory
Config file has to be in .json
"""
default = False
try: # try to get the config from the disk. If inaccessible, create a new default one
self.ensure_persistence_location_exists()
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
if not self.does_object_exist(config_path):
function_config = self.default_function_config
default = True
func_config_dict = function_config.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
else:
function_config = FunctionConfig().load_from_dict(self.read_json(config_path))
except Exception as e:
function_config = self.default_function_config
default = True
return function_config, default
def update_function_config(self, func_hash, config_to_be_saved):
"""
Save the config file
"""
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
try:
func_config_dict = config_to_be_saved.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
except Exception as e:
pass
def write_json(self, path, data):
self.write(path, json.dumps(data))
def read_json(self, path):
return json.loads(self.read(path))
<fim_middle>try:
self.write(log_file_path, self.buffers[log_file_path], mode="a+b")
# update buffers
written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path]
self.buffers[log_file_path].clear()
self.buffer_rolling_size[log_file_path] = 0
self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path]
self.save_bloom_filter() | try:
self.write(log_file_path, self.buffers[log_file_path], mode="a+b")
# update buffers
written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path]
self.buffers[log_file_path].clear()
self.buffer_rolling_size[log_file_path] = 0
self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path]
self.save_bloom_filter() | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/utils.py
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
<fim_suffix>
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True | try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
<fim_suffix>
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>try:
return self.instantiate(data, arg) | try:
return self.instantiate(data, arg) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
<fim_suffix>
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>try:
return int(float(data)) | try:
return int(float(data)) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
<fim_suffix>
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>try:
instantiated_item = self.instantiate(item, item_type) | try:
instantiated_item = self.instantiate(item, item_type) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/utils.py
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
<fim_suffix>
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>try:
obj = origin(**value)
return True | try:
obj = origin(**value)
return True | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
<fim_suffix>
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>try:
return target_type.model_validate(data) | try:
return target_type.model_validate(data) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
<fim_suffix>
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>try:
return target_type(**data) | try:
return target_type(**data) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _save_contrastive_alignment_pair(self, function_hash: str, args, kwargs, pair, positive=True):
"""
Save a contrastive pair
"""
example = FunctionExample(args, kwargs, pair)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_embeddable_align(function_hash, example, positive)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if positive:
if function_hash in self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if not positive:
if function_hash in self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.embeddable_align_buffer:
self.embeddable_align_buffer[function_hash] = bytearray()
self.embeddable_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
# tanuki_py/src/tanuki/function_modeler.py
def save_symbolic_align_statements(self, function_hash, args, kwargs, output):
"""
Save the align statements and add to the align buffer
Do not save if the function hash is in the store data blacklist
Then just add the datapoints to the align buffer
"""
# prepare output for saving and later parsing
# make a deepcopy of the output to avoid changing the original object
copy_output = copy.deepcopy(output)
parsed_output = prepare_object_for_saving(copy_output)
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
example = FunctionExample(parsed_args, parsed_kwargs, parsed_output)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_symbolic_align(function_hash, example)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if function_hash in self.dataset_sizes[SYMBOLIC_ALIGNMENTS]:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.symbolic_align_buffer:
self.symbolic_align_buffer[function_hash] = bytearray()
self.symbolic_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def get_patch_location_for_function(self, func_hash, extension: Union[
ALIGN_FILE_EXTENSION_TYPE, PATCH_FILE_EXTENSION_TYPE] = "") -> str:
"""
Get the local location of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
return os.path.join(self.log_directory, func_hash + extension)
"""
import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, NEGATIVE_FILE_EXTENSION, PATCH_FILE_EXTENSION
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.models.function_config import FunctionConfig
# PATCH_FILE_EXTENSION_TYPE = Literal[".patches"]
# ALIGN_FILE_EXTENSION_TYPE = Literal[".alignments"]
# POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".positive_embedding"]
# NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".negative_embedding"]
#
# PATCH_FILE_EXTENSION: PATCH_FILE_EXTENSION_TYPE = ".patches"
# ALIGN_FILE_EXTENSION: ALIGN_FILE_EXTENSION_TYPE = ".alignments"
# POSITIVE_EMBEDDING_FILE_EXTENSION: POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_positives"
# NEGATIVE_EMBEDDING_FILE_EXTENSION: NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_negatives"
#
# EXPECTED_ITEMS = 10000
# FALSE_POSITIVE_RATE = 0.01
# LIB_NAME = "tanuki"
# ENVVAR = "TANUKI_LOG_DIR"
class ABCBufferedLogger(DatasetWorker):
def __init__(self, name, level=15):
self.buffers = {}
self.mapped_files = {}
self.miss_count = 0
self.hit_count = 0
self.flush_limit = {}
self.buffer_rolling_size = {}
self.write_count = 0
self.write_limit = 1000 # Save the Bloom filter every 1000 writes
super().__init__(name, level)
self.bloom_filter = self.create_bloom_filter()
self.load_bloom_filter()
self.default_function_config = FunctionConfig()
@abstractmethod
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
"""
Get an instance of the bloom filter persistence provider. This exposes some persistent file storage,
that must support reading and writing raw byte streams.
:return:
"""
pass
@abstractmethod
def load_existing_datasets(self) -> Dict[str, Dict[str, Any]]:
"""
Get the lengths of all datasets backing the registered functions, including aligns.
:return:
"""
pass
@abstractmethod
def ensure_persistence_location_exists(self):
"""
Ensure that the place we will be writing to actually exists. If not, create it.
"""
pass
@abstractmethod
def get_patch_location_for_function(self, func_hash, extension="") -> str:
"""
Get the address of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
pass
@abstractmethod
def write(self, path, data, mode="a") -> None:
pass
@abstractmethod
def read(self, path) -> str:
pass
@abstractmethod
def get_hash_from_path(self, path) -> str:
pass
@abstractmethod
def does_object_exist(self, path) -> bool:
pass
def create_bloom_filter(self):
bloom_filter_persistence = self.get_bloom_filter_persistence()
bloom_filter = BloomFilter(
bloom_filter_persistence,
expected_number_of_elements=EXPECTED_ITEMS,
false_positive_probability=FALSE_POSITIVE_RATE)
return bloom_filter
def load_bloom_filter(self):
try:
self.bloom_filter.load()
except FileNotFoundError:
self.debug("No Bloom filter found. Creating a new one.")
def write_symbolic_align_call(self, func_hash, example) -> bool:
log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool:
if positive:
log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION)
else:
log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def log_embeddable_align(self, func_hash, example, positive=True, **kws):
"""
Log a contrastive function invocation
Args:
func_hash: A string representation of the function signature and input parameters
example: The example object
positive: Whether the example is positive or negative
**kws:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_embeddable_align_call(func_hash, example, positive)
return successfully_saved, new_datapoint
def log_symbolic_align(self, func_hash, *args, **kws):
"""
Log an align function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param args: Example objects
:param kws:
:return:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
example = args[0]
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_symbolic_align_call(func_hash, example)
return successfully_saved, new_datapoint
def log_symbolic_patch(self, func_hash, example):
"""
Log a patched function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param example:
:return:
"""
if not isinstance(func_hash, str):
func_hash = str(func_hash)
example_data = str(example.__dict__).encode('utf-8') + b'\n'
bloom_filter_representation = func_hash + '_' + example_data.decode('utf-8')
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
self.hit_count += 1
return {}
self.miss_count += 1
# Add to Bloom Filter
self.bloom_filter.add(bloom_filter_representation)
<fim_suffix>
except Exception as e:
return {}
log_file_path = self.get_patch_location_for_function(func_hash, extension=PATCH_FILE_EXTENSION)
if log_file_path not in self.buffers:
self.buffers[log_file_path] = bytearray()
if log_file_path not in self.flush_limit:
self.flush_limit[log_file_path] = 1
self.buffers[log_file_path].extend(example_data)
self.write_count += 1
if log_file_path not in self.buffer_rolling_size:
self.buffer_rolling_size[log_file_path] = 1
else:
self.buffer_rolling_size[log_file_path] += 1
if self.write_count >= self.write_limit:
written_datapoints = self.flush()
self.save_bloom_filter()
self.write_count = 0 # Reset counter
return written_datapoints
if len(self.buffers[log_file_path]) >= min(self.flush_limit[log_file_path], 4096): # Flush after reaching 4KB
written_datapoints = {}
try:
self.write(log_file_path, self.buffers[log_file_path], mode="a+b")
# update buffers
written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path]
self.buffers[log_file_path].clear()
self.buffer_rolling_size[log_file_path] = 0
self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path]
self.save_bloom_filter()
except Exception as e:
pass
return written_datapoints
return {}
def save_bloom_filter(self):
try:
self.bloom_filter.save()
except Exception as e:
self.warning("Could not save Bloom filter: {}".format(e))
def flush(self):
# get log directory
written_datapoints = {}
for log_file_path, buffer in self.buffers.items():
if len(buffer) > 0:
try:
self.write(log_file_path, buffer, mode="a+b")
written_datapoints[self.get_hash_from_path(log_file_path)] = self.buffer_rolling_size[log_file_path]
self.buffer_rolling_size[log_file_path] = 0
buffer.clear()
except Exception as e:
pass
return written_datapoints
def load_function_config(self, func_hash):
"""
Get the config file for the function. Uses the message and log directory
Config file has to be in .json
"""
default = False
try: # try to get the config from the disk. If inaccessible, create a new default one
self.ensure_persistence_location_exists()
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
if not self.does_object_exist(config_path):
function_config = self.default_function_config
default = True
func_config_dict = function_config.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
else:
function_config = FunctionConfig().load_from_dict(self.read_json(config_path))
except Exception as e:
function_config = self.default_function_config
default = True
return function_config, default
def update_function_config(self, func_hash, config_to_be_saved):
"""
Save the config file
"""
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
try:
func_config_dict = config_to_be_saved.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
except Exception as e:
pass
def write_json(self, path, data):
self.write(path, json.dumps(data))
def read_json(self, path):
return json.loads(self.read(path))
<fim_middle>try:
self.ensure_persistence_location_exists() | try:
self.ensure_persistence_location_exists() | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/utils.py
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
<fim_suffix>
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>except:
return False | except:
return False | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
<fim_suffix>
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>except:
continue | except:
continue | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
<fim_suffix>
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>except (ValueError, TypeError):
pass | except (ValueError, TypeError):
pass | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _construct_config_from_finetune(self, finetune_hash: str, finetune: FinetuneJob):
"""
Construct a valid function config from a finetune job
Args:
finetune_hash: The hash of the function
finetune: The finetune job
Returns:
config: The function config
"""
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.model_name.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model.model_name[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": [], # default teacher models, will be overwritten if needed
"nr_of_training_runs": nr_of_training_runs}
config = FunctionConfig().load_from_dict(config)
return config
# tanuki_py/src/tanuki/utils.py
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
<fim_suffix>
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>except Exception as e:
print(e)
return False | except Exception as e:
print(e)
return False | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
<fim_suffix>
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>except (ValueError, TypeError):
pass | except (ValueError, TypeError):
pass | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
<fim_suffix>
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data) | except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data) | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/abc_buffered_logger.py
def write_symbolic_align_call(self, func_hash, example) -> bool:
log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
# tanuki_py/src/tanuki/trackers/abc_buffered_logger.py
def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool:
if positive:
log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION)
else:
log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
# tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py
def load_existing_datasets(self) -> Dict[str, Dict[str, str]]:
log_directory = self.log_directory
dataset_lengths = {
SYMBOLIC_ALIGNMENTS: {},
POSITIVE_EMBEDDABLE_ALIGNMENTS: {},
NEGATIVE_EMBEDDABLE_ALIGNMENTS: {},
PATCHES: {},
}
try:
if not os.path.exists(log_directory):
os.makedirs(log_directory)
# get all the files in the log directory
files = os.listdir(log_directory)
# discard all .json files
files = [x for x in files if ".json" not in x]
except Exception as e:
return dataset_lengths
for file in files:
if ALIGN_FILE_EXTENSION not in file \
and PATCH_FILE_EXTENSION not in file \
and POSITIVE_FILE_EXTENSION not in file \
and NEGATIVE_FILE_EXTENSION not in file:
continue
elif ALIGN_FILE_EXTENSION in file:
dataset_type = SYMBOLIC_ALIGNMENTS
elif POSITIVE_FILE_EXTENSION in file:
dataset_type = POSITIVE_EMBEDDABLE_ALIGNMENTS
elif NEGATIVE_FILE_EXTENSION in file:
dataset_type = NEGATIVE_EMBEDDABLE_ALIGNMENTS
else:
dataset_type = PATCHES
func_hash = file.replace(ALIGN_FILE_EXTENSION, "").replace(PATCH_FILE_EXTENSION, "")
dataset_lengths[dataset_type][func_hash] = -1
return dataset_lengths
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
<fim_suffix>
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>except json.JSONDecodeError:
return False | except json.JSONDecodeError:
return False | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/language_models/openai_api.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/language_models/anyscale_api.py
def generate(self, model, system_message, prompt, **kwargs):
"""
The main generation function, given the args, kwargs, function_modeler, function description and model type, generate a response
Args
model (Anyscaleconfig): The model to use for generation.
system_message (str): The system message to use for generation.
prompt (str): The prompt to use for generation.
kwargs (dict): Additional generation parameters.
"""
self.check_api_key()
temperature = kwargs.get("temperature", 0.1)
top_p = kwargs.get("top_p", 1)
frequency_penalty = kwargs.get("frequency_penalty", 0)
presence_penalty = kwargs.get("presence_penalty", 0)
max_new_tokens = kwargs.get("max_new_tokens")
# check if there are any generation parameters that are not supported
unsupported_params = [param for param in kwargs.keys() if param not in LLM_GENERATION_PARAMETERS]
if len(unsupported_params) > 0:
# log warning
logging.warning(f"Unused generation parameters sent as input: {unsupported_params}."\
f"For Anyscale, only the following parameters are supported: {LLM_GENERATION_PARAMETERS}")
params = {
"model": model.model_name,
"temperature": temperature,
"max_tokens": max_new_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
}
if model.parsing_helper_tokens["start_token"]:
prompt += model.parsing_helper_tokens["start_token"]
messages = [
{
"role": "system",
"content": system_message
},
{
"role": "user",
"content": prompt
}
]
params["messages"] = messages
counter = 0
choice = None
# initiate response so exception logic doesnt error out when checking for error in response
response = {}
while counter <= 5:
try:
anyscale_headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
response = requests.post(
f"{ANYSCALE_URL}/chat/completions",
headers=anyscale_headers,
json=params, timeout=50
)
response = response.json()
choice = response["choices"][0]["message"]["content"].strip("'")
break
except Exception as e:
if ("error" in response and
"code" in response["error"] and
response["error"]["code"] == 'invalid_api_key'):
raise Exception(f"The supplied Anyscale API key {self.api_key} is invalid")
if counter == 5:
raise Exception(f"Anyscale API failed to generate a response: {e}")
counter += 1
time.sleep(2 ** counter)
continue
if not choice:
raise Exception("Anyscale API failed to generate a response")
if model.parsing_helper_tokens["end_token"]:
# remove the end token from the choice
choice = choice.split(model.parsing_helper_tokens["end_token"])[0]
# check if starting token is in choice
if model.parsing_helper_tokens["start_token"] in choice:
# remove the starting token from the choice
choice = choice.split(model.parsing_helper_tokens["start_token"])[-1]
return choice.strip()
# tanuki_py/src/tanuki/language_models/togetherai_api.py
def generate(self, model, system_message, prompt, **kwargs):
"""
The main generation function, given the args, kwargs, function_modeler, function description and model type, generate a response
Args
model (OpenAIConfig): The model to use for generation.
system_message (str): The system message to use for generation.
prompt (str): The prompt to use for generation.
kwargs (dict): Additional generation parameters.
"""
self.check_api_key()
if model.model_name not in self.model_configs:
self.model_configs[model.model_name] = together.Models.info(model.model_name)['config']
temperature = kwargs.get("temperature", 0.1)
top_p = kwargs.get("top_p", 1)
frequency_penalty = kwargs.get("frequency_penalty", 0)
presence_penalty = kwargs.get("presence_penalty", 0)
max_new_tokens = kwargs.get("max_new_tokens")
# check if there are any generation parameters that are not supported
unsupported_params = [param for param in kwargs.keys() if param not in LLM_GENERATION_PARAMETERS]
if len(unsupported_params) > 0:
# log warning
logging.warning(f"Unused generation parameters sent as input: {unsupported_params}."\
f"For OpenAI, only the following parameters are supported: {LLM_GENERATION_PARAMETERS}")
params = {
"model": model.model_name,
"temperature": temperature,
"max_tokens": max_new_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
if "stop" in self.model_configs[model.model_name]:
params["stop"] = list(self.model_configs[model.model_name]["stop"])
if model.parsing_helper_tokens["end_token"]:
params["stop"] = model.parsing_helper_tokens["end_token"]
chat_prompt = model.chat_template
if chat_prompt is None:
try:
prompt_format = str(self.model_configs[model.model_name]['prompt_format'])
final_prompt = prompt_format.format(system_message=system_message, prompt=prompt)
except:
logging.warning("Chat prompt is not defined for this model. "\
"Please define it in the model config. Using default chat prompt")
chat_prompt = "[INST]{system_message}[/INST]\n{user_prompt}"
final_prompt = chat_prompt.format(system_message=system_message, user_prompt=prompt)
else:
final_prompt = chat_prompt.format(system_message=system_message, user_prompt=prompt)
if model.parsing_helper_tokens["start_token"]:
final_prompt += model.parsing_helper_tokens["start_token"]
params["prompt"] = final_prompt
counter = 0
choice = None
# initiate response so exception logic doesnt error out when checking for error in response
response = {}
while counter <= 5:
try:
openai_headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
response = requests.post(
TOGETHER_AI_URL, headers=openai_headers, json=params, timeout=50
)
response = response.json()
choice = response["output"]["choices"][0]["text"].strip("'")
break
except Exception as e:
if ("error" in response and
"code" in response["error"] and
response["error"]["code"] == 'invalid_api_key'):
raise Exception(f"The supplied Together AI API key {self.api_key} is invalid")
if counter == 5:
raise Exception(f"Together AI API failed to generate a response: {e}")
counter += 1
time.sleep(2 ** counter)
continue
if not choice:
raise Exception("TogetherAI API failed to generate a response")
if model.parsing_helper_tokens["end_token"]:
# remove the end token from the choice
choice = choice.split(model.parsing_helper_tokens["end_token"])[0]
# check if starting token is in choice
if model.parsing_helper_tokens["start_token"] in choice:
# remove the starting token from the choice
choice = choice.split(model.parsing_helper_tokens["start_token"])[-1]
return choice.strip()
# tanuki_py/src/tanuki/language_models/llama_bedrock_api.py
def generate(self, model: BaseModelConfig, system_message: str, prompt: str, **kwargs):
"""
Generate a response using the Bedrock API for the specified LLama model.
Args:
model: The model to use for generation.
system_message: The system message to use for generation.
prompt: The prompt to use for generation.
kwargs: Additional generation parameters.
Returns:
The generated response.
"""
# this needs to be done generally better, introduce the LLM_gen params class
# so you can config it at the start
temperature = kwargs.get("temperature", 0.1)
top_p = kwargs.get("top_p", 1)
max_tokens_to_sample = kwargs.get("max_new_tokens")
# check if there are any generation parameters that are not supported
unsupported_params = [param for param in kwargs.keys() if param not in LLM_GENERATION_PARAMETERS]
if len(unsupported_params) > 0:
# log warning
logging.warning(f"Unused generation parameters sent as input: {unsupported_params}."\
f"For Llama Bedrock, only the following parameters are supported: {LLM_GENERATION_PARAMETERS}")
chat_prompt = model.chat_template
if chat_prompt is None:
raise Exception("Chat prompt is not defined for this model"\
"Please define it in the model config")
final_prompt = chat_prompt.format(system_message=system_message, user_prompt=prompt)
if model.parsing_helper_tokens["start_token"]:
final_prompt += model.parsing_helper_tokens["start_token"]
body = json.dumps({
"prompt": final_prompt,
"max_gen_len": max_tokens_to_sample,
"temperature": temperature,
"top_p": top_p,
})
response_body = self.send_api_request(model, body)
choice = response_body.get("generation")
if model.parsing_helper_tokens["end_token"]:
# remove the end token from the choice
choice = choice.split(model.parsing_helper_tokens["end_token"])[0]
# check if starting token is in choice
if model.parsing_helper_tokens["start_token"] in choice:
# remove the starting token from the choice
choice = choice.split(model.parsing_helper_tokens["start_token"])[-1]
return choice.strip()
"""
from typing import List
import logging
import time
# import abstract base class
from openai import OpenAI
from openai.types import CreateEmbeddingResponse
from openai.types.fine_tuning import FineTuningJob
from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API
from tanuki.models.embedding import Embedding
from tanuki.language_models.embedding_api_abc import Embedding_API
from tanuki.language_models.llm_api_abc import LLM_API
import os
from tanuki.constants import DEFAULT_DISTILLED_MODEL_NAME
from tanuki.language_models.llm_configs.openai_config import OpenAIConfig
from tanuki.models.finetune_job import FinetuneJob
import copy
OPENAI_URL = "https://api.openai.com/v1/chat/completions"
import requests
LLM_GENERATION_PARAMETERS = ["temperature", "top_p", "max_new_tokens", "frequency_penalty", "presence_penalty"]
class OpenAI_API(LLM_API, Embedding_API, LLM_Finetune_API):
def __init__(self) -> None:
# initialise the abstract base class
super().__init__()
self.api_key = os.environ.get("OPENAI_API_KEY")
self.client = None
def embed(self, texts: List[str], model: OpenAIConfig, **kwargs) -> List[Embedding]:
"""
Generate embeddings for the provided texts using the specified OpenAI model.
Lightweight wrapper over the OpenAI client.
:param texts: A list of texts to embed.
:param model: The model to use for embeddings.
:return: A list of embeddings.
"""
self.check_api_key()
try:
response: CreateEmbeddingResponse = self.client.embeddings.create(
input=texts,
model=model.model_name,
**kwargs
)
assert response.object == "list"
assert len(response.data) == len(texts)
embeddings = []
for embedding_response in response.data:
assert embedding_response.object == "embedding"
embeddings.append(Embedding(embedding_response.embedding))
return embeddings
except Exception as e:
print(f"An error occurred: {e}")
return None
def generate(self, model, system_message, prompt, **kwargs):
"""
The main generation function, given the args, kwargs, function_modeler, function description and model type, generate a response
Args
model (OpenAIConfig): The model to use for generation.
system_message (str): The system message to use for generation.
prompt (str): The prompt to use for generation.
kwargs (dict): Additional generation parameters.
"""
self.check_api_key()
temperature = kwargs.get("temperature", 0.1)
top_p = kwargs.get("top_p", 1)
frequency_penalty = kwargs.get("frequency_penalty", 0)
presence_penalty = kwargs.get("presence_penalty", 0)
max_new_tokens = kwargs.get("max_new_tokens")
# check if there are any generation parameters that are not supported
unsupported_params = [param for param in kwargs.keys() if param not in LLM_GENERATION_PARAMETERS]
if len(unsupported_params) > 0:
# log warning
logging.warning(f"Unused generation parameters sent as input: {unsupported_params}."\
f"For OpenAI, only the following parameters are supported: {LLM_GENERATION_PARAMETERS}")
params = {
"model": model.model_name,
"temperature": temperature,
"max_tokens": max_new_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
}
if model.parsing_helper_tokens["start_token"]:
prompt += model.parsing_helper_tokens["start_token"]
messages = [
{
"role": "system",
"content": system_message
},
{
"role": "user",
"content": prompt
}
]
params["messages"] = messages
counter = 0
choice = None
# initiate response so exception logic doesnt error out when checking for error in response
response = {}
while counter <= 5:
try:
openai_headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
response = requests.post(
OPENAI_URL, headers=openai_headers, json=params, timeout=50
)
response = response.json()
choice = response["choices"][0]["message"]["content"].strip("'")
break
<fim_suffix>
if not choice:
raise Exception("OpenAI API failed to generate a response")
if model.parsing_helper_tokens["end_token"]:
# remove the end token from the choice
choice = choice.split(model.parsing_helper_tokens["end_token"])[0]
# check if starting token is in choice
if model.parsing_helper_tokens["start_token"] in choice:
# remove the starting token from the choice
choice = choice.split(model.parsing_helper_tokens["start_token"])[-1]
return choice
def list_finetuned(self, model_config, limit=100, **kwargs) -> List[FinetuneJob]:
self.check_api_key()
response = self.client.fine_tuning.jobs.list(limit=limit)
jobs = []
for job in response.data:
finetune_job = self.create_finetune_job(job, model_config)
jobs.append(finetune_job)
return jobs
def get_finetuned(self, job_id, model_config: OpenAIConfig) -> FinetuneJob:
self.check_api_key()
response = self.client.fine_tuning.jobs.retrieve(job_id)
finetune_job = self.create_finetune_job(response, model_config= model_config)
return finetune_job
def finetune(self, file, suffix, model_config, **kwargs) -> FinetuneJob:
self.check_api_key()
# Use the stream as a file
response = self.client.files.create(file=file, purpose='fine-tune')
training_file_id = response.id
if not model_config.base_model_for_sft:
model_config.base_model_for_sft = DEFAULT_DISTILLED_MODEL_NAME
# submit the finetuning job
finetuning_response: FineTuningJob = self.client.fine_tuning.jobs.create(training_file=training_file_id,
model=model_config.base_model_for_sft,
suffix=suffix)
finetune_job = self.create_finetune_job(finetuning_response, model_config)
return finetune_job
def create_finetune_job(self, response: FineTuningJob, model_config: OpenAIConfig) -> FinetuneJob:
finetuned_model_config = copy.deepcopy(model_config)
finetuned_model_config.model_name = response.fine_tuned_model
finetune_job = FinetuneJob(response.id, response.status, finetuned_model_config)
return finetune_job
def check_api_key(self):
# check if api key is not none
if not self.api_key:
# try to get the api key from the environment, maybe it has been set later
self.api_key = os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("OpenAI API key is not set")
if not self.client:
self.client = OpenAI(api_key=self.api_key)
<fim_middle>except Exception as e:
if ("error" in response and
"code" in response["error"] and
response["error"]["code"] == 'invalid_api_key'):
raise Exception(f"The supplied OpenAI API key {self.api_key} is invalid")
if counter == 5:
raise Exception(f"OpenAI API failed to generate a response: {e}")
counter += 1
time.sleep(2 ** counter)
continue | except Exception as e:
if ("error" in response and
"code" in response["error"] and
response["error"]["code"] == 'invalid_api_key'):
raise Exception(f"The supplied OpenAI API key {self.api_key} is invalid")
if counter == 5:
raise Exception(f"OpenAI API failed to generate a response: {e}")
counter += 1
time.sleep(2 ** counter)
continue | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
<fim_suffix>
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") | except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.") | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
<fim_suffix>
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") | except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.") | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def log_symbolic_patch(self, func_hash, example):
"""
Save the example to the patch dataset for the function hash
Output must be a dictionary with the following structure:
{
"func_hash": int
}
Where func_hash is the hash of the function and int is the number of datapoints written to the dataset for this function
Args:
func_hash (str): the function hash
example (FunctionExample): the example to be saved
Returns:
dict: dictionary with the structure above
"""
# tanuki_py/src/tanuki/__init__.py
def get_instance_from_args(args):
# Check if there are any arguments
if args:
first_arg = args[0]
# Check if the first argument is named "self" or "cls" (or any other specific name)
if isinstance(first_arg, ast.Name) and first_arg.id in ("self", "cls"):
instance = first_arg
args = args[1:] # Remove the first argument
else:
instance = None
else:
instance = None
return instance, args
# tanuki_py/src/tanuki/trackers/dataset_worker.py
def load_function_config(self, func_hash):
"""
Get the config file for the function.
Function config must be a dictionary and have the following structure:
distilled_model (str): distilled_model_name ("" if no distilled model),
current_model_stats (dict): dict for current model stats
example:
{
"trained_on_datapoints" (int): 12 (number of datapoints trained on, 0 if not trained yet),
"running_faults" (list): [0, 0, 1] (list of 0s and 1s, where 0 is no fault and 1 is fault)
}
last_training_run (dict): dict for the last training run
example:
{
"job_id" (str): job_id for last training run,
"trained_on_datapoints" (int): dataset_size that was trained on,
"last_checked" (datetime in "%Y-%m-%d %H:%M:%S"): When the last check was made for status of training run)
}
Example when no training has been done yet:
{
"trained_on_datapoints": 0
}
current_training_run (dict): Same structure as last_training_run, only is non-empty if currently a model is training
Example when no training has been done yet:
{}
teacher_models (list of string): list of teacher models
example:
["gpt-4", "gpt-4-32k"]
nr_of_training_runs (int): number of training runs that have been done in total
}
The config file must be returned as a dictionary
Args:
func_hash (str): the function hash
Returns:
dict: the function config
"""
pass
"""
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
<fim_suffix>
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle>for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue | for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>tanuki_py/src/tanuki/bloom_filter.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# tanuki_py/src/tanuki/function_modeler.py
def _configure_student_model(self,
student_model: str,
func_hash: str,
task_type: str):
"""
Add custom student models to the function config
First this is added to the teacher_models_override dict, which is used to override the teacher models
Args:
teacher_models: A list of teacher models to use for the function hash
func_hash: The function hash to add the teacher models to
"""
if task_type == FunctionType.EMBEDDABLE:
logging.info("Embeddable function type does not support student models")
preconfigured_models = DEFAULT_STUDENT_MODELS
if student_model not in preconfigured_models:
raise Exception(f"Student model {student_model} is currently not supported.")
model_config = preconfigured_models[student_model]
self.student_model_override[func_hash] = model_config
# tanuki_py/src/tanuki/function_modeler.py
def postprocess_symbolic_datapoint(self, func_hash, function_description, example, repaired=True):
"""
Postprocess the datapoint
First check if the datapoint should be added to the training data
Add the datapoint if it should be added
Then check if the function should be finetuned and execute finetuning if it should
"""
try:
if func_hash not in self.store_data_blacklist:
added = self.save_symbolic_datapoint(func_hash, example)
if added:
self._update_datapoint_config(repaired, func_hash)
except Exception as e:
print(e)
print("Could not add datapoint to training data")
if func_hash not in self.execute_finetune_blacklist:
self.check_for_finetuning(function_description, func_hash)
# tanuki_py/src/tanuki/language_models/language_model_manager.py
def __call__(self,
args,
function_description: FunctionDescription,
kwargs,
validator: Validator,
generation_parameters: dict) -> Any:
# add the generation length if not there
if "max_new_tokens" not in generation_parameters:
generation_parameters["max_new_tokens"] = self.default_generation_length
output = self.generate(args, kwargs, function_description, generation_parameters)
# start parsing the object, very hacky way for the time being
choice_parsed = self._parse_choice(output)
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
choice, choice_parsed, successful_repair = self.repair_output(args,
kwargs,
function_description,
output.generated_response,
validator,
generation_parameters)
if not successful_repair:
raise TypeError(
f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{output.generated_response}'")
output.generated_response = choice
output.distilled_model = False
datapoint = FunctionExample(args, kwargs, output.generated_response)
if output.suitable_for_finetuning and not output.distilled_model:
self.function_modeler.postprocess_symbolic_datapoint(function_description.__hash__(), function_description,
datapoint, repaired=not valid)
instantiated = validator.instantiate(choice_parsed, function_description.output_type_hint)
return instantiated
"""
import hashlib
import logging
import math
import numpy as np
from bitarray import bitarray
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
class BloomFilter:
def __init__(self,
persistence: IBloomFilterPersistence,
size=None,
hash_count=None,
expected_number_of_elements=None,
false_positive_probability=None):
if not persistence:
raise ValueError("Persistence cannot be None, it must be an instance of IBloomFilterPersistence")
if not size and not hash_count and not expected_number_of_elements and not false_positive_probability:
raise ValueError("Must specify either (size, hash_count) or (expected_number_of_elements, false_positive_probability")
if expected_number_of_elements and false_positive_probability:
size, hash_count = BloomFilter.optimal_bloom_filter_params(expected_number_of_elements, false_positive_probability)
if not size and not hash_count:
raise ValueError("Size and hash_count not set. This should never happen.")
self.size = size
self.hash_count = hash_count
self.bit_array, self.indices = self.init_bit_array(size)
self.persistence = persistence
def init_bit_array(self, size):
_bit_array = bitarray(size)
_bit_array.setall(0)
_indices = np.zeros(size, dtype=np.int32)
return _bit_array, _indices
def hash_functions(self, string):
# h1(x)
hash1 = int(hashlib.sha256(string.encode('utf-8')).hexdigest(), 16)
# h2(x)
hash2 = int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16)
return hash1, hash2
def lookup(self, string):
hash1, hash2 = self.hash_functions(string)
for seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
#print(f"Lookup: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}")
if self.bit_array[index] == 0:
return False
return True
def add(self, string):
hash1, hash2 = self.hash_functions(string)
<fim_suffix>
def save(self):
self.persistence.save(self.bit_array)
def load(self):
self.bit_array = self.persistence.load()
length_in_bytes = int(len(self.bit_array)/8)
expected_length = math.ceil(self.size / 8)
if length_in_bytes != expected_length:
logging.warning("Bit array length does not match expected size, and so might be corrupted. Reinitializing.")
self.bit_array, self.indices = self.init_bit_array(self.size)
self.save()
@staticmethod
def optimal_bloom_filter_params(n, p):
"""
Calculate the optimal bit array size (m) and number of hash functions (k)
for a Bloom filter.
n: expected number of items to be stored
p: acceptable false positive probability
Returns a tuple (m, k)
"""
m = - (n * math.log(p)) / (math.log(2) ** 2)
k = (m / n) * math.log(2)
return int(math.ceil(m)), int(math.ceil(k))<fim_middle>for seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
self.bit_array[index] = 1
#print(f"Add: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}") | for seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
self.bit_array[index] = 1
#print(f"Add: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}") | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |