inputs stringlengths 312 52k | targets stringlengths 1 3.1k ⌀ | block_type stringclasses 11
values | scenario stringclasses 7
values |
|---|---|---|---|
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1]) | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | if mm is None:
mm = "1" | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = term... | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx] | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid '... | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
impor... | if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in te... | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | return np.array([], dtype=bool) | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | max_scores = np.zeros(len(frame)) | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | mask = mask & curr_mask | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | return matches | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | term_scores = [] | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | return self.posns.docfreq(self.term_dict.get_term_id(token)) | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tup... | _, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm) | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tup... | msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
impor... | return [np.array([], dtype=np.uint32)] | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
impor... | return decs | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
... | FOR | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if n... | FOR | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.j... | FOR | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1 | FOR | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses... | FOR | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=... | FOR | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tup... | for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
... | FOR | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | for curr_mask in masks:
mask = mask & curr_mask | FOR | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
impor... | for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0) | FOR | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1]) | FOR | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/term_dict.py<fim_prefix>import sys
class TermMissingError(KeyError):
def __init__(self, msg):
super().__init__(msg)
class TermDict:
def __init__(self):
self.term_to_ids = {}
self.id_to_terms = {}
def add_term(self, term):
if term in se... | try:
return self.term_to_ids[term] | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | try:
return int(value) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict... | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | try:
return self.posns.docfreq(self.term_dict.get_term_id(token)) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
impor... | try:
return self.docfreq_cache[term_id] | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
... | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
impor... | try:
return self.termfreq_cache[term_id] | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | try:
return int(value) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
impor... | try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | except TermMissingError:
return phrase_freqs | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | except ValueError:
raise ValueError(error_message) | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | except IndexError:
raise IndexError("index out of bounds") | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
impor... | except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | except TermMissingError:
return 0 | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
impor... | except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
impor... | except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
impo... | except TermMissingError:
return np.zeros(len(self), dtype=int) | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | except ValueError:
raise ValueError(error_message) | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/term_dict.py<fim_prefix>import sys
class TermMissingError(KeyError):
def __init__(self, msg):
super().__init__(msg)
class TermDict:
def __init__(self):
self.term_to_ids = {}
self.id_to_terms = {}
def add_term(self, term):
if term in se... | except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.") | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | def listify(x):
return x if isinstance(x, list) else [x] | METHOD | prefix_suffix_full_complete_current_block_no_evidence |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse... | def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message) | METHOD | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json
from typing import Any, Dict
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.llm_api_abc import LLM_API
from tanuki.models.function_description import FunctionDescription
from tanuki.models.f... | """
Check if the inputs are suitable for finetuning, i.e are below the finetuning token count
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | """
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/utils.py<fim_prefix>import dataclasses
import datetime
import inspect
import json
import typing
from typing import get_args, Literal
import string
import types
def json_default(thing):
try:
return dataclasses.asdict(thing)
except TypeError:
pass
if isinstance(... | """
Get a dictionary representation of the object
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, N... | """
Log a patched function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param example:
:return:
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/persistence/filter/filesystem_bloom.py<fim_prefix>import os
from bitarray._bitarray import bitarray
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
class BloomFilterFileSystemDriver(IBloomFilterPersistence):
"""
This is a Filesystem implementation... | """
Load a bloom filter from the local filesystem.
:return: A bloom filter object containing the state of unique function invocations
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/models/function_config.py<fim_prefix>from pydantic import BaseModel
from typing import Dict, List
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
from tanuki.language_models.llm_configs import DEFAULT_TEACHER_MODELS, DEFAULT_STUDENT_MODELS
from tanuki.consta... | """
Update the function config with the finetuned response
Args:
response: The finetuned response
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | """Validate base types.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/function_modeler.py<fim_prefix>import ast
import datetime
import io
import json
from typing import List, Tuple, Dict, Union
import logging
from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \
NEGATIVE_EMBEDDABLE_ALIGNMEN... | """
Return the current model from the config file
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>import os
from enum import Enum
from typing import Literal, Union, Optional, Dict
from appdirs import user_data_dir
from tanuki.constants import *
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.... | """
Get an instance of the bloom filter persistence provider. Typically this will be a file system provider.
:return: A persistence provider
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/function_modeler.py<fim_prefix>import ast
import datetime
import io
import json
from typing import List, Tuple, Dict, Union
import logging
from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \
NEGATIVE_EMBEDDABLE_ALIGNMEN... | """
Load the config file for a function hash
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple) | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base) | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | if not isinstance(value, tuple):
return False | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items) | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, N... | if log_file_path not in self.buffers:
self.buffers[log_file_path] = bytearray() | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json
from typing import Any, Dict
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.llm_api_abc import LLM_API
from tanuki.models.function_description import FunctionDescription
from tanuki.models.f... | if func_hash not in self.initialized_functions:
# initialise the initialized_functions dict
self.initialized_functions[func_hash] = {"model": "", "examples": []} | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
... | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
... | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value) | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value) | IF | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | return target_type(data) | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/persistence/filter/filesystem_bloom.py<fim_prefix>import os
from bitarray._bitarray import bitarray
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
class BloomFilterFileSystemDriver(IBloomFilterPersistence):
"""
This is a Filesystem implementation... | bloom_filter_path = os.path.join(self.log_directory, 'bloom_filter_state.bin') | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | origin = get_origin(target_type) | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/models/function_description.py<fim_prefix>import hashlib
from dataclasses import dataclass
from typing import Dict, Optional, Literal
from tanuki.models.function_type import FunctionType
from tanuki.utils import json_dumps
@dataclass(frozen=True)
class FunctionDescription:
name: str... | return str(h) | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/function_modeler.py<fim_prefix>import ast
import datetime
import io
import json
from typing import List, Tuple, Dict, Union
import logging
from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \
NEGATIVE_EMBEDDABLE_ALIGNMEN... | self.function_configs = {} | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | return dict(instantiated_dict) | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, N... | config_path = f"{log_file_path}.json" | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | key_type, value_type = args | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | item_type = get_args(target_type)[0] if get_args(target_type) else Any | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | item_type = item_types[0] if item_types else Any | STATEMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>import os
from enum import Enum
from typing import Literal, Union, Optional, Dict
from appdirs import user_data_dir
from tanuki.constants import *
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.... | # If installed in a project that contains a git repo - place it in the same folder as the git repo | LINE_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | # try to instantiate datetime | LINE_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, N... | # remove teacher_models from the config | LINE_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json
from typing import Any, Dict
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.llm_api_abc import LLM_API
from tanuki.models.function_description import FunctionDescription
from tanuki.models.f... | # initialise the initialized_functions dict | LINE_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json
from typing import Any, Dict
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.llm_api_abc import LLM_API
from tanuki.models.function_description import FunctionDescription
from tanuki.models.f... | # update the examples in the initialized_functions dict | LINE_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | # If the target type is a built-in, attempt to instantiate and return | LINE_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | # backwards compatibility with pydantic < 2 | LINE_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | # If none of the above, return the data as-is | LINE_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | # check that all required arguments are in value and do type checking | LINE_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/trackers/filesystem_buffered_logger.py<fim_prefix>import os
from enum import Enum
from typing import Literal, Union, Optional, Dict
from appdirs import user_data_dir
from tanuki.constants import *
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.... | # If installed as a library | LINE_COMMENT | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | try:
return int(float(data)) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, N... | try:
self.write(log_file_path, self.buffers[log_file_path], mode="a+b")
# update buffers
written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path]
self.buffers[log_file_path].clear()
self.buffer_rolling_size[log_file_path] = 0... | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return F... | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | try:
return self.instantiate(data, arg) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | try:
return int(float(data)) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | try:
instantiated_item = self.instantiate(item, item_type) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | try:
obj = origin(**value)
return True | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | try:
return target_type.model_validate(data) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | try:
return target_type(**data) | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, N... | try:
self.ensure_persistence_location_exists() | TRY | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | except:
return False | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, O... | except:
continue | CATCH | prefix_suffix_full_complete_current_block_no_evidence |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.