inputs
stringlengths
312
52k
targets
stringlengths
1
3.1k
block_type
stringclasses
11 values
scenario
stringclasses
7 values
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np impor...
phrase_freqs.shape[0] == self.max_doc_id + 1: enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids] else: enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id], keys=doc_ids.view(np.uint64)) for term_id in term...
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
np.array([], dtype=bool)
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
= np.zeros(len(frame))
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
= mask & curr_mask
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
matches
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
= []
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
self.posns.docfreq(self.term_dict.get_term_id(token))
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array. See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm """ import numpy as np import sortednp as snp import logging import numbers from typing import Optional, Tup...
(lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits, rhs_shifted, indices=True, algorithm=_algorithm)
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array. See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm """ import numpy as np import sortednp as snp import logging import numbers from typing import Optional, Tup...
= (encoded & self.payload_msb_mask) >> self.payload_msb_bits
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np impor...
[np.array([], dtype=np.uint32)]
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np impor...
decs
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" ...
FOR
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_...
FOR
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join...
FOR
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1
FOR
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <=...
FOR
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=sim...
FOR
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array. See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm """ import numpy as np import sortednp as snp import logging import numbers from typing import Optional, Tup...
bit in range(self.payload_lsb_bits): mask = 1 << bit lsbs = encoded & mask set_lsbs = (lsbs != 0) this_keys = keys[set_lsbs] payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits) doc_with_posn = np.dstack([this_keys, payload])[0] to...
FOR
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
curr_mask in masks: mask = mask & curr_mask
FOR
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np impor...
rhs in encoded_posns[1:]: # Only count the count of the last bigram (ignoring the ones where priors did not match) phrase_freqs[mask] = 0 phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs) mask &= (phrase_freqs > 0)
FOR
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1])
FOR
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/term_dict.py<fim_prefix>import sys class TermMissingError(KeyError): def __init__(self, msg): super().__init__(msg) class TermDict: def __init__(self): self.term_to_ids = {} self.id_to_terms = {} def add_term(self, term): if term in se...
return self.term_to_ids[term]
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
return int(value)
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
doc_ids = self.term_mat.rows term_ids = [self.term_dict.get_term_id(token) for token in tokens] return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids, phrase_freqs=phrase_freqs)
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
rows = self.term_mat[key] doc_len = self.doc_lens[key] doc_id = key if doc_id < 0: doc_id += len(self) return _row_to_postings_row(doc_id, rows[0], doc_len, self.term_dict, se...
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
return self.posns.docfreq(self.term_dict.get_term_id(token))
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np impor...
return self.docfreq_cache[term_id]
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
term_id = self.term_dict.get_term_id(token) matches = np.zeros(len(self), dtype=int) slice_of_rows = None if self.term_mat.subset: slice_of_rows = self.term_mat.rows doc_ids, termfreqs = self.posns.termfreqs(term_id, ...
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np impor...
return self.termfreq_cache[term_id]
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
return int(value)
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np impor...
np_doc_ids = convert_keys(doc_ids) term_posns = encoder.slice(self.encoded_term_posns[term_id], keys=np_doc_ids)
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
TermMissingError: return phrase_freqs
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
ValueError: raise ValueError(error_message)
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
IndexError: raise IndexError("index out of bounds")
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np impor...
KeyError: encoded = self.encoded_term_posns[term_id] docfreq = np.uint64(encoder.keys_unique(encoded).size) self._maybe_cache_docfreq(term_id, docfreq) return docfreq
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
TermMissingError: return 0
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np impor...
KeyError: term_posns = self.encoded_term_posns[term_id] doc_ids, term_freqs = self._computed_term_freqs(term_posns) if self._is_cached(term_id): self.termfreq_cache[term_id] = (doc_ids, term_freqs) return doc_ids, term_freqs
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping. See this notebook for motivation: https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG """ import numpy as np impor...
KeyError: r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids] if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number): return [r_val[0]] return r_val
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take impo...
TermMissingError: return np.zeros(len(self), dtype=int)
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
ValueError: raise ValueError(error_message)
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/term_dict.py<fim_prefix>import sys class TermMissingError(KeyError): def __init__(self, msg): super().__init__(msg) class TermDict: def __init__(self): self.term_to_ids = {} self.id_to_terms = {} def add_term(self, term): if term in se...
KeyError: raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
listify(x): return x if isinstance(x, list) else [x]
METHOD
prefix_suffix_full_complete_current_block_with_evidence
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse...
checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message)
METHOD
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json from typing import Any, Dict from tanuki.function_modeler import FunctionModeler from tanuki.language_models.llm_api_abc import LLM_API from tanuki.models.function_description import FunctionDescription from tanuki.models.f...
Check if the inputs are suitable for finetuning, i.e are below the finetuning token count """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
Validate a value against a type definition. Args: value: Any object or primitive value type_definition: The type definition to validate against Returns: Whether the value is valid for the type definition """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/utils.py<fim_prefix>import dataclasses import datetime import inspect import json import typing from typing import get_args, Literal import string import types def json_default(thing): try: return dataclasses.asdict(thing) except TypeError: pass if isinstance(...
Get a dictionary representation of the object """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json from abc import abstractmethod from typing import Dict, Any, Literal from tanuki.bloom_filter import BloomFilter from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \ POSITIVE_FILE_EXTENSION, N...
Log a patched function invocation to the file system :param func_hash: A string representation of the function signature and input parameters :param example: :return: """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/persistence/filter/filesystem_bloom.py<fim_prefix>import os from bitarray._bitarray import bitarray from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence class BloomFilterFileSystemDriver(IBloomFilterPersistence): """ This is a Filesystem implementation...
Load a bloom filter from the local filesystem. :return: A bloom filter object containing the state of unique function invocations """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/models/function_config.py<fim_prefix>from pydantic import BaseModel from typing import Dict, List from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig from tanuki.language_models.llm_configs import DEFAULT_TEACHER_MODELS, DEFAULT_STUDENT_MODELS from tanuki.consta...
Update the function config with the finetuned response Args: response: The finetuned response """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
base types."""
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/function_modeler.py<fim_prefix>import ast import datetime import io import json from typing import List, Tuple, Dict, Union import logging from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \ NEGATIVE_EMBEDDABLE_ALIGNMEN...
Return the current model from the config file """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>import os from enum import Enum from typing import Literal, Union, Optional, Dict from appdirs import user_data_dir from tanuki.constants import * from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence from tanuki....
Get an instance of the bloom filter persistence provider. Typically this will be a file system provider. :return: A persistence provider """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/function_modeler.py<fim_prefix>import ast import datetime import io import json from typing import List, Tuple, Dict, Union import logging from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \ NEGATIVE_EMBEDDABLE_ALIGNMEN...
Load the config file for a function hash """
BLOCK_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
self._is_subclass_of_generic(target_type, tuple): return target_type(instantiated_tuple)
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
hasattr(target_type, '__orig_bases__'): for base in target_type.__orig_bases__: if get_args(base): return base, get_args(base)
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
not isinstance(value, tuple): return False
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type): return target_type(instantiated_items)
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json from abc import abstractmethod from typing import Dict, Any, Literal from tanuki.bloom_filter import BloomFilter from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \ POSITIVE_FILE_EXTENSION, N...
log_file_path not in self.buffers: self.buffers[log_file_path] = bytearray()
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json from typing import Any, Dict from tanuki.function_modeler import FunctionModeler from tanuki.language_models.llm_api_abc import LLM_API from tanuki.models.function_description import FunctionDescription from tanuki.models.f...
func_hash not in self.initialized_functions: # initialise the initialized_functions dict self.initialized_functions[func_hash] = {"model": "", "examples": []}
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
self.is_base_type(target_type) or target_type is Any: # If the parsed data is a string and target type is str, return it directly if isinstance(data, str) and target_type is str: return data # If any, return the data directly if target_type is Any: ...
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)): key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any) instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in ...
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
origin == list: if not isinstance(value, list): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value)
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
origin == tuple: if not isinstance(value, tuple): return False item_type = args[0] if args else Any return all(self.check_type(v, item_type) for v in value)
IF
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
target_type(data)
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/persistence/filter/filesystem_bloom.py<fim_prefix>import os from bitarray._bitarray import bitarray from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence class BloomFilterFileSystemDriver(IBloomFilterPersistence): """ This is a Filesystem implementation...
= os.path.join(self.log_directory, 'bloom_filter_state.bin')
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
= get_origin(target_type)
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/models/function_description.py<fim_prefix>import hashlib from dataclasses import dataclass from typing import Dict, Optional, Literal from tanuki.models.function_type import FunctionType from tanuki.utils import json_dumps @dataclass(frozen=True) class FunctionDescription: name: str...
str(h)
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/function_modeler.py<fim_prefix>import ast import datetime import io import json from typing import List, Tuple, Dict, Union import logging from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \ NEGATIVE_EMBEDDABLE_ALIGNMEN...
= {}
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
dict(instantiated_dict)
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json from abc import abstractmethod from typing import Dict, Any, Literal from tanuki.bloom_filter import BloomFilter from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \ POSITIVE_FILE_EXTENSION, N...
= f"{log_file_path}.json"
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
value_type = args
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
= get_args(target_type)[0] if get_args(target_type) else Any
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
= item_types[0] if item_types else Any
STATEMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>import os from enum import Enum from typing import Literal, Union, Optional, Dict from appdirs import user_data_dir from tanuki.constants import * from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence from tanuki....
If installed in a project that contains a git repo - place it in the same folder as the git repo
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
try to instantiate datetime
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json from abc import abstractmethod from typing import Dict, Any, Literal from tanuki.bloom_filter import BloomFilter from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \ POSITIVE_FILE_EXTENSION, N...
remove teacher_models from the config
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json from typing import Any, Dict from tanuki.function_modeler import FunctionModeler from tanuki.language_models.llm_api_abc import LLM_API from tanuki.models.function_description import FunctionDescription from tanuki.models.f...
initialise the initialized_functions dict
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json from typing import Any, Dict from tanuki.function_modeler import FunctionModeler from tanuki.language_models.llm_api_abc import LLM_API from tanuki.models.function_description import FunctionDescription from tanuki.models.f...
update the examples in the initialized_functions dict
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
If the target type is a built-in, attempt to instantiate and return
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
backwards compatibility with pydantic < 2
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
If none of the above, return the data as-is
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
check that all required arguments are in value and do type checking
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>import os from enum import Enum from typing import Literal, Union, Optional, Dict from appdirs import user_data_dir from tanuki.constants import * from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence from tanuki....
If installed as a library
LINE_COMMENT
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
return int(float(data))
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json from abc import abstractmethod from typing import Dict, Any, Literal from tanuki.bloom_filter import BloomFilter from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \ POSITIVE_FILE_EXTENSION, N...
self.write(log_file_path, self.buffers[log_file_path], mode="a+b") # update buffers written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path] self.buffers[log_file_path].clear() self.buffer_rolling_size[log_file_path] = 0 ...
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
#temp_model = create_model('TempModel', **value) if isinstance(value, origin): return True #return isinstance(temp_model, origin) # check if value is dict if not isinstance(value, dict): return False...
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
return self.instantiate(data, arg)
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
return int(float(data))
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
instantiated_item = self.instantiate(item, item_type)
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
obj = origin(**value) return True
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
return target_type.model_validate(data)
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
return target_type(**data)
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json from abc import abstractmethod from typing import Dict, Any, Literal from tanuki.bloom_filter import BloomFilter from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \ POSITIVE_FILE_EXTENSION, N...
self.ensure_persistence_location_exists()
TRY
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
return False
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
continue
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
(ValueError, TypeError): pass
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
Exception as e: print(e) return False
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
(ValueError, TypeError): pass
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
AttributeError as e: # backwards compatibility with pydantic < 2 return target_type.parse_obj(data)
CATCH
prefix_suffix_full_complete_current_block_with_evidence
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc from collections import defaultdict import collections import typing from collections import deque import dataclasses import inspect import json from dataclasses import is_dataclass from typing import get_origin, get_args, Any, Mapping, MutableMapping, O...
json.JSONDecodeError: return False
CATCH
prefix_suffix_full_complete_current_block_with_evidence