language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PrefectHQ__prefect | tests/_internal/pydantic/test_validated_func.py | {
"start": 9235,
"end": 11657
} | class ____:
"""Test edge cases and corner scenarios."""
def test_no_parameters(self):
def func():
return "no params"
vf = ValidatedFunction(func)
result = vf.validate_call_args((), {})
assert result == {}
def test_only_defaults(self):
def func(a=1, b=2):
return a + b
vf = ValidatedFunction(func)
result = vf.validate_call_args((), {})
assert result == {"a": 1, "b": 2}
def test_empty_var_args(self):
def func(*args):
return args
vf = ValidatedFunction(func)
result = vf.validate_call_args((), {})
assert result == {"args": []}
def test_empty_var_kwargs(self):
def func(**kwargs):
return kwargs
vf = ValidatedFunction(func)
result = vf.validate_call_args((), {})
assert result == {"kwargs": {}}
def test_reserved_parameter_name_v__args(self):
"""Test that using reserved parameter name v__args raises ValueError."""
def func(v__args):
return v__args
with pytest.raises(
ValueError, match="Function parameters conflict with internal field names"
):
ValidatedFunction(func)
def test_reserved_parameter_name_v__kwargs(self):
"""Test that using reserved parameter name v__kwargs raises ValueError."""
def func(v__kwargs):
return v__kwargs
with pytest.raises(
ValueError, match="Function parameters conflict with internal field names"
):
ValidatedFunction(func)
def test_reserved_parameter_name_v__positional_only(self):
"""Test that using reserved parameter name v__positional_only raises ValueError."""
def func(v__positional_only):
return v__positional_only
with pytest.raises(
ValueError, match="Function parameters conflict with internal field names"
):
ValidatedFunction(func)
def test_reserved_parameter_name_v__duplicate_kwargs(self):
"""Test that using reserved parameter name v__duplicate_kwargs raises ValueError."""
def func(v__duplicate_kwargs):
return v__duplicate_kwargs
with pytest.raises(
ValueError, match="Function parameters conflict with internal field names"
):
ValidatedFunction(func)
| TestEdgeCases |
python | getsentry__sentry | src/sentry/integrations/jira_server/search.py | {
"start": 800,
"end": 3892
} | class ____(IntegrationEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
provider = IntegrationProviderSlug.JIRA_SERVER.value
def _get_integration(self, organization, integration_id) -> Integration:
return Integration.objects.get(
organizationintegration__organization_id=organization.id,
id=integration_id,
provider=self.provider,
)
def get(
self, request: Request, organization: RpcOrganization, integration_id: int, **kwds: Any
) -> Response:
try:
integration = self._get_integration(organization, integration_id)
except Integration.DoesNotExist:
return Response(status=404)
installation = integration.get_installation(organization.id)
assert isinstance(installation, JiraServerIntegration), installation
jira_client = installation.get_client()
field = request.GET.get("field")
query = request.GET.get("query")
if field is None:
return Response({"detail": "field is a required parameter"}, status=400)
if not query:
return Response({"detail": "query is a required parameter"}, status=400)
if field in ("externalIssue", "parent"):
if not query:
return Response([])
try:
resp = installation.search_issues(query)
except IntegrationError as e:
return Response({"detail": str(e)}, status=400)
return Response(
[
{"label": "({}) {}".format(i["key"], i["fields"]["summary"]), "value": i["key"]}
for i in resp.get("issues", [])
]
)
if field in ("assignee", "reporter"):
try:
response = jira_client.search_users_for_project(
request.GET.get("project", ""), query
)
except (ApiUnauthorized, ApiError):
return Response({"detail": "Unable to fetch users from Jira"}, status=400)
user_tuples = filter(
None, [build_user_choice(user, jira_client.user_id_field()) for user in response]
)
users = [{"value": user_id, "label": display} for user_id, display in user_tuples]
return Response(users)
try:
response = jira_client.get_field_autocomplete(name=field, value=query)
except (ApiUnauthorized, ApiError):
return Response(
{"detail": f"Unable to fetch autocomplete for {field} from Jira"},
status=400,
)
choices = [
{
"value": result["value"],
# Jira's response will highlight the matching substring in the name using HTML formatting.
"label": BeautifulSoup(result["displayName"], "html.parser").get_text(),
}
for result in response["results"]
]
return Response(choices)
| JiraServerSearchEndpoint |
python | spack__spack | lib/spack/spack/package_base.py | {
"start": 106519,
"end": 107015
} | class ____(InstallError):
"""Raised when package is still needed by another on uninstall."""
def __init__(self, spec, dependents):
spec_fmt = spack.spec.DEFAULT_FORMAT + " /{hash:7}"
dep_fmt = "{name}{@versions} /{hash:7}"
super().__init__(
f"Cannot uninstall {spec.format(spec_fmt)}, "
f"needed by {[dep.format(dep_fmt) for dep in dependents]}"
)
self.spec = spec
self.dependents = dependents
| PackageStillNeededError |
python | kamyu104__LeetCode-Solutions | Python/most-expensive-item-that-can-not-be-bought.py | {
"start": 504,
"end": 978
} | class ____(object):
def mostExpensiveItem(self, primeOne, primeTwo):
"""
:type primeOne: int
:type primeTwo: int
:rtype: int
"""
dp = [False]*max(primeOne, primeTwo)
dp[0] = True
result = 1
for i in xrange(2, primeOne*primeTwo):
dp[i%len(dp)] = dp[(i-primeOne)%len(dp)] or dp[(i-primeTwo)%len(dp)]
if not dp[i%len(dp)]:
result = i
return result
| Solution2 |
python | apache__airflow | shared/secrets_masker/tests/secrets_masker/test_secrets_masker.py | {
"start": 26118,
"end": 27478
} | class ____:
def test_circular_references(self):
circular_dict: dict[str, any] = {"key": "value", "password": "secret_password"}
circular_dict["self_ref"] = circular_dict
secrets_masker = SecretsMasker()
configure_secrets_masker_for_test(secrets_masker)
with patch(
"airflow_shared.secrets_masker.secrets_masker._secrets_masker", return_value=secrets_masker
):
redacted_data = redact(circular_dict)
assert redacted_data["key"] == "value"
assert redacted_data["password"] == "***"
assert isinstance(redacted_data["self_ref"], dict)
def test_regex_special_chars_in_secrets(self):
regex_secrets = ["password+with*chars", "token.with[special]chars", "api_key^that$needs(escaping)"]
secrets_masker = SecretsMasker()
configure_secrets_masker_for_test(secrets_masker)
for secret in regex_secrets:
secrets_masker.add_mask(secret)
test_string = f"Contains {regex_secrets[0]} and {regex_secrets[1]} and {regex_secrets[2]}"
redacted = secrets_masker.redact(test_string)
for secret in regex_secrets:
assert secret not in redacted
assert redacted.count("***") == 3
assert redacted.startswith("Contains ")
assert " and " in redacted
| TestEdgeCases |
python | huggingface__transformers | src/transformers/data/processors/xnli.py | {
"start": 889,
"end": 3481
} | class ____(DataProcessor):
"""
Processor for the XNLI dataset. Adapted from
https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207
"""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_train_examples(self, data_dir):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv"))
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"train-{i}"
text_a = line[0]
text_b = line[1]
label = "contradiction" if line[2] == "contradictory" else line[2]
if not isinstance(text_a, str):
raise TypeError(f"Training input {text_a} is not a string")
if not isinstance(text_b, str):
raise TypeError(f"Training input {text_b} is not a string")
if not isinstance(label, str):
raise TypeError(f"Training label {label} is not a string")
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv"))
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
language = line[0]
if language != self.language:
continue
guid = f"test-{i}"
text_a = line[6]
text_b = line[7]
label = line[1]
if not isinstance(text_a, str):
raise TypeError(f"Training input {text_a} is not a string")
if not isinstance(text_b, str):
raise TypeError(f"Training input {text_b} is not a string")
if not isinstance(label, str):
raise TypeError(f"Training label {label} is not a string")
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
xnli_processors = {
"xnli": XnliProcessor,
}
xnli_output_modes = {
"xnli": "classification",
}
xnli_tasks_num_labels = {
"xnli": 3,
}
| XnliProcessor |
python | RaRe-Technologies__gensim | gensim/test/test_doc2vec.py | {
"start": 1757,
"end": 32720
} | class ____(unittest.TestCase):
def test_persistence(self):
"""Test storing/loading the entire model."""
tmpf = get_tmpfile('gensim_doc2vec.tst')
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
model.save(tmpf)
self.models_equal(model, doc2vec.Doc2Vec.load(tmpf))
def test_persistence_fromfile(self):
"""Test storing/loading the entire model."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
tmpf = get_tmpfile('gensim_doc2vec.tst')
model = doc2vec.Doc2Vec(corpus_file=corpus_file, min_count=1)
model.save(tmpf)
self.models_equal(model, doc2vec.Doc2Vec.load(tmpf))
def test_persistence_word2vec_format(self):
"""Test storing the entire model in word2vec format."""
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
# test saving both document and word embedding
test_doc_word = get_tmpfile('gensim_doc2vec.dw')
model.save_word2vec_format(test_doc_word, doctag_vec=True, word_vec=True, binary=False)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_doc_word, binary=False)
self.assertEqual(len(model.wv) + len(model.dv), len(binary_model_dv))
# test saving document embedding only
test_doc = get_tmpfile('gensim_doc2vec.d')
model.save_word2vec_format(test_doc, doctag_vec=True, word_vec=False, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_doc, binary=True)
self.assertEqual(len(model.dv), len(binary_model_dv))
# test saving word embedding only
test_word = get_tmpfile('gensim_doc2vec.w')
model.save_word2vec_format(test_word, doctag_vec=False, word_vec=True, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_word, binary=True)
self.assertEqual(len(model.wv), len(binary_model_dv))
def obsolete_testLoadOldModel(self):
"""Test loading an old doc2vec model from indeterminate version"""
model_file = 'doc2vec_old' # which version?!?
model = doc2vec.Doc2Vec.load(datapath(model_file))
self.assertTrue(model.wv.vectors.shape == (3955, 100))
self.assertTrue(len(model.wv) == 3955)
self.assertTrue(len(model.wv.index_to_key) == 3955)
self.assertIsNone(model.corpus_total_words)
self.assertTrue(model.syn1neg.shape == (len(model.wv), model.vector_size))
self.assertTrue(model.wv.vectors_lockf.shape == (3955, ))
self.assertTrue(model.cum_table.shape == (3955, ))
self.assertTrue(model.dv.vectors.shape == (300, 100))
self.assertTrue(model.dv.vectors_lockf.shape == (300, ))
self.assertTrue(len(model.dv) == 300)
self.model_sanity(model)
def obsolete_testLoadOldModelSeparates(self):
"""Test loading an old doc2vec model from indeterminate version"""
# Model stored in multiple files
model_file = 'doc2vec_old_sep'
model = doc2vec.Doc2Vec.load(datapath(model_file))
self.assertTrue(model.wv.vectors.shape == (3955, 100))
self.assertTrue(len(model.wv) == 3955)
self.assertTrue(len(model.wv.index_to_key) == 3955)
self.assertIsNone(model.corpus_total_words)
self.assertTrue(model.syn1neg.shape == (len(model.wv), model.vector_size))
self.assertTrue(model.wv.vectors_lockf.shape == (3955, ))
self.assertTrue(model.cum_table.shape == (3955, ))
self.assertTrue(model.dv.vectors.shape == (300, 100))
self.assertTrue(model.dv.vectors_lockf.shape == (300, ))
self.assertTrue(len(model.dv) == 300)
self.model_sanity(model)
def obsolete_test_load_old_models_pre_1_0(self):
"""Test loading pre-1.0 models"""
model_file = 'd2v-lee-v0.13.0'
model = doc2vec.Doc2Vec.load(datapath(model_file))
self.model_sanity(model)
old_versions = [
'0.12.0', '0.12.1', '0.12.2', '0.12.3', '0.12.4',
'0.13.0', '0.13.1', '0.13.2', '0.13.3', '0.13.4',
]
for old_version in old_versions:
self._check_old_version(old_version)
def obsolete_test_load_old_models_1_x(self):
"""Test loading 1.x models"""
old_versions = [
'1.0.0', '1.0.1',
]
for old_version in old_versions:
self._check_old_version(old_version)
def obsolete_test_load_old_models_2_x(self):
"""Test loading 2.x models"""
old_versions = [
'2.0.0', '2.1.0', '2.2.0', '2.3.0',
]
for old_version in old_versions:
self._check_old_version(old_version)
def obsolete_test_load_old_models_pre_3_3(self):
"""Test loading 3.x models"""
old_versions = [
'3.2.0', '3.1.0', '3.0.0'
]
for old_version in old_versions:
self._check_old_version(old_version)
def obsolete_test_load_old_models_post_3_2(self):
"""Test loading 3.x models"""
old_versions = [
'3.4.0', '3.3.0',
]
for old_version in old_versions:
self._check_old_version(old_version)
def _check_old_version(self, old_version):
logging.info("TESTING LOAD of %s Doc2Vec MODEL", old_version)
saved_models_dir = datapath('old_d2v_models/d2v_{}.mdl')
model = doc2vec.Doc2Vec.load(saved_models_dir.format(old_version))
self.assertTrue(len(model.wv) == 3)
self.assertIsNone(model.corpus_total_words)
self.assertTrue(model.wv.vectors.shape == (3, 4))
self.assertTrue(model.dv.vectors.shape == (2, 4))
self.assertTrue(len(model.dv) == 2)
# check if inferring vectors for new documents and similarity search works.
doc0_inferred = model.infer_vector(list(DocsLeeCorpus())[0].words)
sims_to_infer = model.dv.most_similar([doc0_inferred], topn=len(model.dv))
self.assertTrue(sims_to_infer)
# check if inferring vectors and similarity search works after saving and loading back the model
tmpf = get_tmpfile('gensim_doc2vec.tst')
model.save(tmpf)
loaded_model = doc2vec.Doc2Vec.load(tmpf)
doc0_inferred = loaded_model.infer_vector(list(DocsLeeCorpus())[0].words)
sims_to_infer = loaded_model.dv.most_similar([doc0_inferred], topn=len(loaded_model.dv))
self.assertTrue(sims_to_infer)
def test_doc2vec_train_parameters(self):
model = doc2vec.Doc2Vec(vector_size=50)
model.build_vocab(corpus_iterable=list_corpus)
self.assertRaises(TypeError, model.train, corpus_file=11111)
self.assertRaises(TypeError, model.train, corpus_iterable=11111)
self.assertRaises(TypeError, model.train, corpus_iterable=sentences, corpus_file='test')
self.assertRaises(TypeError, model.train, corpus_iterable=None, corpus_file=None)
self.assertRaises(TypeError, model.train, corpus_file=sentences)
@unittest.skipIf(os.name == 'nt', "See another test for Windows below")
def test_get_offsets_and_start_doctags(self):
# Each line takes 6 bytes (including '\n' character)
lines = ['line1\n', 'line2\n', 'line3\n', 'line4\n', 'line5\n']
tmpf = get_tmpfile('gensim_doc2vec.tst')
with utils.open(tmpf, 'wb', encoding='utf8') as fout:
for line in lines:
fout.write(utils.any2unicode(line))
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 1)
self.assertEqual(offsets, [0])
self.assertEqual(start_doctags, [0])
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 2)
self.assertEqual(offsets, [0, 12])
self.assertEqual(start_doctags, [0, 2])
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 3)
self.assertEqual(offsets, [0, 6, 18])
self.assertEqual(start_doctags, [0, 1, 3])
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 4)
self.assertEqual(offsets, [0, 6, 12, 18])
self.assertEqual(start_doctags, [0, 1, 2, 3])
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 5)
self.assertEqual(offsets, [0, 6, 12, 18, 24])
self.assertEqual(start_doctags, [0, 1, 2, 3, 4])
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 6)
self.assertEqual(offsets, [0, 0, 6, 12, 18, 24])
self.assertEqual(start_doctags, [0, 0, 1, 2, 3, 4])
@unittest.skipIf(os.name != 'nt', "See another test for posix above")
def test_get_offsets_and_start_doctags_win(self):
# Each line takes 7 bytes (including '\n' character which is actually '\r\n' on Windows)
lines = ['line1\n', 'line2\n', 'line3\n', 'line4\n', 'line5\n']
tmpf = get_tmpfile('gensim_doc2vec.tst')
with utils.open(tmpf, 'wb', encoding='utf8') as fout:
for line in lines:
fout.write(utils.any2unicode(line))
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 1)
self.assertEqual(offsets, [0])
self.assertEqual(start_doctags, [0])
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 2)
self.assertEqual(offsets, [0, 14])
self.assertEqual(start_doctags, [0, 2])
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 3)
self.assertEqual(offsets, [0, 7, 21])
self.assertEqual(start_doctags, [0, 1, 3])
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 4)
self.assertEqual(offsets, [0, 7, 14, 21])
self.assertEqual(start_doctags, [0, 1, 2, 3])
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 5)
self.assertEqual(offsets, [0, 7, 14, 21, 28])
self.assertEqual(start_doctags, [0, 1, 2, 3, 4])
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 6)
self.assertEqual(offsets, [0, 0, 7, 14, 14, 21])
self.assertEqual(start_doctags, [0, 0, 1, 2, 2, 3])
def test_cython_linesentence_readline_after_getting_offsets(self):
lines = ['line1\n', 'line2\n', 'line3\n', 'line4\n', 'line5\n']
tmpf = get_tmpfile('gensim_doc2vec.tst')
with utils.open(tmpf, 'wb', encoding='utf8') as fout:
for line in lines:
fout.write(utils.any2unicode(line))
from gensim.models.word2vec_corpusfile import CythonLineSentence
offsets, start_doctags = doc2vec.Doc2Vec._get_offsets_and_start_doctags_for_corpusfile(tmpf, 5)
for offset, line in zip(offsets, lines):
ls = CythonLineSentence(tmpf, offset)
sentence = ls.read_sentence()
self.assertEqual(len(sentence), 1)
self.assertEqual(sentence[0], utils.any2utf8(line.strip()))
def test_unicode_in_doctag(self):
"""Test storing document vectors of a model with unicode titles."""
model = doc2vec.Doc2Vec(DocsLeeCorpus(unicode_tags=True), min_count=1)
tmpf = get_tmpfile('gensim_doc2vec.tst')
try:
model.save_word2vec_format(tmpf, doctag_vec=True, word_vec=True, binary=True)
except UnicodeEncodeError:
self.fail('Failed storing unicode title.')
def test_load_mmap(self):
"""Test storing/loading the entire model."""
model = doc2vec.Doc2Vec(sentences, min_count=1)
tmpf = get_tmpfile('gensim_doc2vec.tst')
# test storing the internal arrays into separate files
model.save(tmpf, sep_limit=0)
self.models_equal(model, doc2vec.Doc2Vec.load(tmpf))
# make sure mmaping the arrays back works, too
self.models_equal(model, doc2vec.Doc2Vec.load(tmpf, mmap='r'))
def test_int_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.dv.vectors), 300)
self.assertEqual(model.dv[0].shape, (100,))
self.assertEqual(model.dv[np.int64(0)].shape, (100,))
self.assertRaises(KeyError, model.__getitem__, '_*0')
def test_missing_string_doctag(self):
"""Test doc2vec doctag alternatives"""
corpus = list(DocsLeeCorpus(True))
# force duplicated tags
corpus = corpus[0:10] + corpus
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertRaises(KeyError, model.dv.__getitem__, 'not_a_tag')
def test_string_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = list(DocsLeeCorpus(True))
# force duplicated tags
corpus = corpus[0:10] + corpus
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.dv.vectors), 300)
self.assertEqual(model.dv[0].shape, (100,))
self.assertEqual(model.dv['_*0'].shape, (100,))
self.assertTrue(all(model.dv['_*0'] == model.dv[0]))
self.assertTrue(max(model.dv.key_to_index.values()) < len(model.dv.index_to_key))
self.assertLess(
max(model.dv.get_index(str_key) for str_key in model.dv.key_to_index.keys()),
len(model.dv.vectors)
)
# verify dv.most_similar() returns string doctags rather than indexes
self.assertEqual(model.dv.index_to_key[0], model.dv.most_similar([model.dv[0]])[0][0])
def test_empty_errors(self):
# no input => "RuntimeError: you must first build vocabulary before training the model"
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, [])
# input not empty, but rather completely filtered out
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, list_corpus, min_count=10000)
def test_similarity_unseen_docs(self):
"""Test similarity of out of training sentences"""
rome_words = ['rome', 'italy']
car_words = ['car']
corpus = list(DocsLeeCorpus(True))
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertTrue(
model.similarity_unseen_docs(rome_words, rome_words)
> model.similarity_unseen_docs(rome_words, car_words)
)
def model_sanity(self, model, keep_training=True):
"""Any non-trivial model on DocsLeeCorpus can pass these sanity checks"""
fire1 = 0 # doc 0 sydney fires
fire2 = np.int64(8) # doc 8 sydney fires
alt1 = 29 # doc 29 palestine
# inferred vector should be top10 close to bulk-trained one
doc0_inferred = model.infer_vector(list(DocsLeeCorpus())[0].words)
sims_to_infer = model.dv.most_similar([doc0_inferred], topn=len(model.dv))
sims_ids = [docid for docid, sim in sims_to_infer]
self.assertTrue(fire1 in sims_ids, "{0} not found in {1}".format(fire1, sims_to_infer))
f_rank = sims_ids.index(fire1)
self.assertLess(f_rank, 10)
# fire2 should be top30 close to fire1
sims = model.dv.most_similar(fire1, topn=len(model.dv))
f2_rank = [docid for docid, sim in sims].index(fire2)
self.assertLess(f2_rank, 30)
# same sims should appear in lookup by vec as by index
doc0_vec = model.dv[fire1]
sims2 = model.dv.most_similar(positive=[doc0_vec], topn=21)
sims2 = [(id, sim) for id, sim in sims2 if id != fire1] # ignore the doc itself
sims = sims[:20]
self.assertEqual(list(zip(*sims))[0], list(zip(*sims2))[0]) # same doc ids
self.assertTrue(np.allclose(list(zip(*sims))[1], list(zip(*sims2))[1])) # close-enough dists
# sim results should be in clip range if given
clip_sims = \
model.dv.most_similar(fire1, clip_start=len(model.dv) // 2, clip_end=len(model.dv) * 2 // 3)
sims_doc_id = [docid for docid, sim in clip_sims]
for s_id in sims_doc_id:
self.assertTrue(len(model.dv) // 2 <= s_id <= len(model.dv) * 2 // 3)
# fire docs should be closer than fire-alt
self.assertLess(model.dv.similarity(fire1, alt1), model.dv.similarity(fire1, fire2))
self.assertLess(model.dv.similarity(fire2, alt1), model.dv.similarity(fire1, fire2))
# alt doc should be out-of-place among fire news
self.assertEqual(model.dv.doesnt_match([fire1, alt1, fire2]), alt1)
# keep training after save
if keep_training:
tmpf = get_tmpfile('gensim_doc2vec_resave.tst')
model.save(tmpf)
loaded = doc2vec.Doc2Vec.load(tmpf)
loaded.train(corpus_iterable=sentences, total_examples=loaded.corpus_count, epochs=loaded.epochs)
def test_training(self):
"""Test doc2vec training."""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(vector_size=100, min_count=2, epochs=20, workers=1)
model.build_vocab(corpus)
self.assertEqual(model.dv.vectors.shape, (300, 100))
model.train(corpus, total_examples=model.corpus_count, epochs=model.epochs)
self.model_sanity(model)
# build vocab and train in one step; must be the same as above
model2 = doc2vec.Doc2Vec(corpus, vector_size=100, min_count=2, epochs=20, workers=1)
self.models_equal(model, model2)
def test_training_fromfile(self):
"""Test doc2vec training."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(vector_size=100, min_count=2, epochs=20, workers=1)
model.build_vocab(corpus_file=corpus_file)
self.assertEqual(model.dv.vectors.shape, (300, 100))
model.train(corpus_file=corpus_file, total_words=model.corpus_total_words, epochs=model.epochs)
self.model_sanity(model)
model = doc2vec.Doc2Vec(corpus_file=corpus_file, vector_size=100, min_count=2, epochs=20, workers=1)
self.model_sanity(model)
def test_dbow_hs(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=1, negative=0, min_count=2, epochs=20)
self.model_sanity(model)
def test_dbow_hs_fromfile(self):
"""Test DBOW doc2vec training."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(corpus_file=corpus_file, dm=0, hs=1, negative=0, min_count=2, epochs=20)
self.model_sanity(model)
def test_dmm_hs(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, vector_size=24, window=4,
hs=1, negative=0, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dmm_hs_fromfile(self):
"""Test DBOW doc2vec training."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, vector_size=24, window=4,
hs=1, negative=0, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dms_hs(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=0, vector_size=24, window=4, hs=1,
negative=0, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dms_hs_fromfile(self):
"""Test DBOW doc2vec training."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=0, vector_size=24, window=4, hs=1,
negative=0, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dmc_hs(self):
"""Test DM/concatenate doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_concat=1, vector_size=24, window=4,
hs=1, negative=0, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dmc_hs_fromfile(self):
"""Test DBOW doc2vec training."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_concat=1, vector_size=24, window=4,
hs=1, negative=0, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dbow_neg(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, vector_size=16, dm=0, hs=0, negative=5, min_count=2, epochs=40)
self.model_sanity(model)
def test_dbow_neg_fromfile(self):
"""Test DBOW doc2vec training."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(list_corpus, vector_size=16, dm=0, hs=0, negative=5, min_count=2, epochs=40)
self.model_sanity(model)
def test_dmm_neg(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, vector_size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dmm_neg_fromfile(self):
"""Test DBOW doc2vec training."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, vector_size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dms_neg(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=0, vector_size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dms_neg_fromfile(self):
"""Test DBOW doc2vec training."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=0, vector_size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dmc_neg(self):
"""Test DM/concatenate doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_concat=1, vector_size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dmc_neg_fromfile(self):
"""Test DBOW doc2vec training."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_concat=1, vector_size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dmm_fixedwindowsize(self):
"""Test DMM doc2vec training with fixed window size."""
model = doc2vec.Doc2Vec(
list_corpus, vector_size=24,
dm=1, dm_mean=1, window=4, shrink_windows=False,
hs=0, negative=10, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dmm_fixedwindowsize_fromfile(self):
"""Test DMM doc2vec training with fixed window size, from file."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(
corpus_file=corpus_file, vector_size=24,
dm=1, dm_mean=1, window=4, shrink_windows=False,
hs=0, negative=10, alpha=0.05, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dbow_fixedwindowsize(self):
"""Test DBOW doc2vec training with fixed window size."""
model = doc2vec.Doc2Vec(
list_corpus, vector_size=16, shrink_windows=False,
dm=0, hs=0, negative=5, min_count=2, epochs=20
)
self.model_sanity(model)
def test_dbow_fixedwindowsize_fromfile(self):
"""Test DBOW doc2vec training with fixed window size, from file."""
with temporary_file(get_tmpfile('gensim_doc2vec.tst')) as corpus_file:
save_lee_corpus_as_line_sentence(corpus_file)
model = doc2vec.Doc2Vec(
corpus_file=corpus_file, vector_size=16, shrink_windows=False,
dm=0, hs=0, negative=5, min_count=2, epochs=20
)
self.model_sanity(model)
def test_parallel(self):
"""Test doc2vec parallel training with more than default 3 threads."""
# repeat the ~300 doc (~60000 word) Lee corpus to get 6000 docs (~1.2M words)
corpus = utils.RepeatCorpus(DocsLeeCorpus(), 6000)
# use smaller batches-to-workers for more contention
model = doc2vec.Doc2Vec(corpus, workers=6, batch_words=5000)
self.model_sanity(model)
def test_deterministic_hs(self):
"""Test doc2vec results identical with identical RNG seed."""
# hs
model = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1)
self.models_equal(model, model2)
def test_deterministic_neg(self):
"""Test doc2vec results identical with identical RNG seed."""
# neg
model = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1)
self.models_equal(model, model2)
def test_deterministic_dmc(self):
"""Test doc2vec results identical with identical RNG seed."""
# bigger, dmc
model = doc2vec.Doc2Vec(
DocsLeeCorpus(), dm=1, dm_concat=1, vector_size=24,
window=4, hs=1, negative=3, seed=42, workers=1
)
model2 = doc2vec.Doc2Vec(
DocsLeeCorpus(), dm=1, dm_concat=1, vector_size=24,
window=4, hs=1, negative=3, seed=42, workers=1
)
self.models_equal(model, model2)
def test_mixed_tag_types(self):
"""Ensure alternating int/string tags don't share indexes in vectors"""
mixed_tag_corpus = [doc2vec.TaggedDocument(words, [i, words[0]]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec()
model.build_vocab(mixed_tag_corpus)
expected_length = len(sentences) + len(model.dv.key_to_index) # 9 sentences, 7 unique first tokens
self.assertEqual(len(model.dv.vectors), expected_length)
# TODO: test saving in word2vec format
def models_equal(self, model, model2):
# check words/hidden-weights
self.assertEqual(len(model.wv), len(model2.wv))
self.assertTrue(np.allclose(model.wv.vectors, model2.wv.vectors))
if model.hs:
self.assertTrue(np.allclose(model.syn1, model2.syn1))
if model.negative:
self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg))
# check docvecs
self.assertEqual(len(model.dv), len(model2.dv))
self.assertEqual(len(model.dv.index_to_key), len(model2.dv.index_to_key))
def test_word_vec_non_writeable(self):
model = keyedvectors.KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'))
vector = model['says']
with self.assertRaises(ValueError):
vector *= 0
@log_capture()
def test_build_vocab_warning(self, loglines):
"""Test if logger warning is raised on non-ideal input to a doc2vec model"""
raw_sentences = ['human', 'machine']
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec()
model.build_vocab(sentences)
warning = "Each 'words' should be a list of words (usually unicode strings)."
self.assertTrue(warning in str(loglines))
@log_capture()
def test_train_warning(self, loglines):
"""Test if warning is raised if alpha rises during subsequent calls to train()"""
raw_sentences = [['human'],
['graph', 'trees']]
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec(alpha=0.025, min_alpha=0.025, min_count=1, workers=8, vector_size=5)
model.build_vocab(sentences)
for epoch in range(10):
model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs)
model.alpha -= 0.002
model.min_alpha = model.alpha
if epoch == 5:
model.alpha += 0.05
warning = "Effective 'alpha' higher than previous training cycles"
self.assertTrue(warning in str(loglines))
def test_load_on_class_error(self):
"""Test if exception is raised when loading doc2vec model on instance"""
self.assertRaises(AttributeError, load_on_instance)
def test_negative_ns_exp(self):
"""The model should accept a negative ns_exponent as a valid value."""
model = doc2vec.Doc2Vec(sentences, ns_exponent=-1, min_count=1, workers=1)
tmpf = get_tmpfile('d2v_negative_exp.tst')
model.save(tmpf)
loaded_model = doc2vec.Doc2Vec.load(tmpf)
loaded_model.train(sentences, total_examples=model.corpus_count, epochs=1)
assert loaded_model.ns_exponent == -1, loaded_model.ns_exponent
# endclass TestDoc2VecModel
if not hasattr(TestDoc2VecModel, 'assertLess'):
# workaround for python 2.6
def assertLess(self, a, b, msg=None):
self.assertTrue(a < b, msg="%s is not less than %s" % (a, b))
setattr(TestDoc2VecModel, 'assertLess', assertLess)
# Following code is useful for reproducing paragraph-vectors paper sentiment experiments
| TestDoc2VecModel |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 180655,
"end": 184065
} | class ____:
def test_vector(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "freshestcrl.pem"),
x509.load_pem_x509_certificate,
)
fcrl = cert.extensions.get_extension_for_class(x509.FreshestCRL).value
assert fcrl == x509.FreshestCRL(
[
x509.DistributionPoint(
full_name=[
x509.UniformResourceIdentifier(
"http://myhost.com/myca.crl"
),
x509.UniformResourceIdentifier(
"http://backup.myhost.com/myca.crl"
),
],
relative_name=None,
reasons=frozenset(
[
x509.ReasonFlags.ca_compromise,
x509.ReasonFlags.key_compromise,
]
),
crl_issuer=[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COUNTRY_NAME, "US"
),
x509.NameAttribute(
NameOID.COMMON_NAME, "cryptography CA"
),
]
)
)
],
)
]
)
def test_public_bytes(self):
ext = x509.FreshestCRL(
[
x509.DistributionPoint(
full_name=[
x509.UniformResourceIdentifier(
"http://myhost.com/myca.crl"
),
x509.UniformResourceIdentifier(
"http://backup.myhost.com/myca.crl"
),
],
relative_name=None,
reasons=frozenset(
[
x509.ReasonFlags.ca_compromise,
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.aa_compromise,
]
),
crl_issuer=[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COUNTRY_NAME, "US"
),
x509.NameAttribute(
NameOID.COMMON_NAME, "cryptography CA"
),
]
)
)
],
)
]
)
assert (
ext.public_bytes()
== b"0w0u\xa0A\xa0?\x86\x1ahttp://myhost.com/myca.crl\x86!http://"
b"backup.myhost.com/myca.crl\x81\x03\x07`\x80\xa2+\xa4)0'1\x0b0\t"
b"\x06\x03U\x04\x06\x13\x02US1\x180\x16\x06\x03U\x04\x03\x0c\x0fc"
b"ryptography CA"
)
| TestFreshestCRLExtension |
python | great-expectations__great_expectations | docs/logging.py | {
"start": 0,
"end": 478
} | class ____:
"""Simple logger for printing to console during docs build"""
@staticmethod
def print_header(string: str) -> None:
LINE = "================================================================================"
Logger.print(LINE)
Logger.print(string)
Logger.print(LINE)
@staticmethod
def print(string: str) -> None:
ORANGE = "\033[38;5;208m"
END = "\033[1;37;0m"
print(ORANGE + string + END)
| Logger |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/map_test.py | {
"start": 64011,
"end": 66395
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.v2_only_combinations(),
combinations.combine(index=[-1, 4, 5])))
def testInvalidIndex(self, index):
dataset = dataset_ops.Dataset.from_tensor_slices([-1, 0, 1,
2]).map(lambda x: x * 2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.v2_only_combinations(),
combinations.combine(index=[-1, 0])))
def testEmptyDataset(self, index):
dataset = dataset_ops.Dataset.from_tensor_slices([]).map(lambda x: x // 2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(combinations.times(test_base.v2_only_combinations()))
def testBasic(self):
dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3, 4,
5]).map(lambda x: x * 3)
for i in range(5):
self.assertEqual(self.evaluate(random_access.at(dataset, index=i)), i * 3)
@combinations.generate(
combinations.times(
test_base.v2_only_combinations(),
combinations.combine(
elements=[0, 10, 20, 40], num_parallel_calls=[None, 2])))
def testMultipleCombinations(self, elements, num_parallel_calls):
dataset = dataset_ops.Dataset.range(elements).map(
lambda x: x // 2, num_parallel_calls=num_parallel_calls)
for i in range(elements):
self.assertEqual(
self.evaluate(random_access.at(dataset, index=i)), i // 2)
@combinations.generate(
combinations.times(
test_base.v2_only_combinations(),
combinations.combine(
elements=[0, 10, 20, 40], num_parallel_calls=[None, 2])))
def testMapFnInFunction(self, elements, num_parallel_calls):
@def_function.function
def _map_fn(x):
return math_ops.square(x)
dataset = dataset_ops.Dataset.range(elements).map(
_map_fn, num_parallel_calls=num_parallel_calls)
for i in range(elements):
self.assertEqual(
self.evaluate(random_access.at(dataset, index=i)),
self.evaluate(math_ops.square(i)))
| MapRandomAccessTest |
python | doocs__leetcode | lcp/LCP 61. 气温变化趋势/Solution.py | {
"start": 0,
"end": 395
} | class ____:
def temperatureTrend(self, temperatureA: List[int], temperatureB: List[int]) -> int:
ans = f = 0
for (a1, b1), (a2, b2) in pairwise(zip(temperatureA, temperatureB)):
x, y = a2 - a1, b2 - b1
if x == y == 0 or x * y > 0:
f += 1
ans = max(ans, f)
else:
f = 0
return ans
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_lease_candidate_spec.py | {
"start": 383,
"end": 11356
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'binary_version': 'str',
'emulation_version': 'str',
'lease_name': 'str',
'ping_time': 'datetime',
'renew_time': 'datetime',
'strategy': 'str'
}
attribute_map = {
'binary_version': 'binaryVersion',
'emulation_version': 'emulationVersion',
'lease_name': 'leaseName',
'ping_time': 'pingTime',
'renew_time': 'renewTime',
'strategy': 'strategy'
}
def __init__(self, binary_version=None, emulation_version=None, lease_name=None, ping_time=None, renew_time=None, strategy=None, local_vars_configuration=None): # noqa: E501
"""V1beta1LeaseCandidateSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._binary_version = None
self._emulation_version = None
self._lease_name = None
self._ping_time = None
self._renew_time = None
self._strategy = None
self.discriminator = None
self.binary_version = binary_version
if emulation_version is not None:
self.emulation_version = emulation_version
self.lease_name = lease_name
if ping_time is not None:
self.ping_time = ping_time
if renew_time is not None:
self.renew_time = renew_time
self.strategy = strategy
@property
def binary_version(self):
"""Gets the binary_version of this V1beta1LeaseCandidateSpec. # noqa: E501
BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required. # noqa: E501
:return: The binary_version of this V1beta1LeaseCandidateSpec. # noqa: E501
:rtype: str
"""
return self._binary_version
@binary_version.setter
def binary_version(self, binary_version):
"""Sets the binary_version of this V1beta1LeaseCandidateSpec.
BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required. # noqa: E501
:param binary_version: The binary_version of this V1beta1LeaseCandidateSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and binary_version is None: # noqa: E501
raise ValueError("Invalid value for `binary_version`, must not be `None`") # noqa: E501
self._binary_version = binary_version
@property
def emulation_version(self):
"""Gets the emulation_version of this V1beta1LeaseCandidateSpec. # noqa: E501
EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\" # noqa: E501
:return: The emulation_version of this V1beta1LeaseCandidateSpec. # noqa: E501
:rtype: str
"""
return self._emulation_version
@emulation_version.setter
def emulation_version(self, emulation_version):
"""Sets the emulation_version of this V1beta1LeaseCandidateSpec.
EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\" # noqa: E501
:param emulation_version: The emulation_version of this V1beta1LeaseCandidateSpec. # noqa: E501
:type: str
"""
self._emulation_version = emulation_version
@property
def lease_name(self):
"""Gets the lease_name of this V1beta1LeaseCandidateSpec. # noqa: E501
LeaseName is the name of the lease for which this candidate is contending. The limits on this field are the same as on Lease.name. Multiple lease candidates may reference the same Lease.name. This field is immutable. # noqa: E501
:return: The lease_name of this V1beta1LeaseCandidateSpec. # noqa: E501
:rtype: str
"""
return self._lease_name
@lease_name.setter
def lease_name(self, lease_name):
"""Sets the lease_name of this V1beta1LeaseCandidateSpec.
LeaseName is the name of the lease for which this candidate is contending. The limits on this field are the same as on Lease.name. Multiple lease candidates may reference the same Lease.name. This field is immutable. # noqa: E501
:param lease_name: The lease_name of this V1beta1LeaseCandidateSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and lease_name is None: # noqa: E501
raise ValueError("Invalid value for `lease_name`, must not be `None`") # noqa: E501
self._lease_name = lease_name
@property
def ping_time(self):
"""Gets the ping_time of this V1beta1LeaseCandidateSpec. # noqa: E501
PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime. # noqa: E501
:return: The ping_time of this V1beta1LeaseCandidateSpec. # noqa: E501
:rtype: datetime
"""
return self._ping_time
@ping_time.setter
def ping_time(self, ping_time):
"""Sets the ping_time of this V1beta1LeaseCandidateSpec.
PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime. # noqa: E501
:param ping_time: The ping_time of this V1beta1LeaseCandidateSpec. # noqa: E501
:type: datetime
"""
self._ping_time = ping_time
@property
def renew_time(self):
"""Gets the renew_time of this V1beta1LeaseCandidateSpec. # noqa: E501
RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates. # noqa: E501
:return: The renew_time of this V1beta1LeaseCandidateSpec. # noqa: E501
:rtype: datetime
"""
return self._renew_time
@renew_time.setter
def renew_time(self, renew_time):
"""Sets the renew_time of this V1beta1LeaseCandidateSpec.
RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates. # noqa: E501
:param renew_time: The renew_time of this V1beta1LeaseCandidateSpec. # noqa: E501
:type: datetime
"""
self._renew_time = renew_time
@property
def strategy(self):
"""Gets the strategy of this V1beta1LeaseCandidateSpec. # noqa: E501
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved. # noqa: E501
:return: The strategy of this V1beta1LeaseCandidateSpec. # noqa: E501
:rtype: str
"""
return self._strategy
@strategy.setter
def strategy(self, strategy):
"""Sets the strategy of this V1beta1LeaseCandidateSpec.
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved. # noqa: E501
:param strategy: The strategy of this V1beta1LeaseCandidateSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and strategy is None: # noqa: E501
raise ValueError("Invalid value for `strategy`, must not be `None`") # noqa: E501
self._strategy = strategy
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1LeaseCandidateSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1LeaseCandidateSpec):
return True
return self.to_dict() != other.to_dict()
| V1beta1LeaseCandidateSpec |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_structured_output_retry.py | {
"start": 587,
"end": 2262
} | class ____(AgentMiddleware):
"""Retries model calls when structured output parsing fails."""
def __init__(self, max_retries: int) -> None:
"""Initialize the structured output retry middleware.
Args:
max_retries: Maximum number of retry attempts.
"""
self.max_retries = max_retries
def wrap_model_call(
self, request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
) -> ModelResponse:
"""Intercept and control model execution via handler callback.
Args:
request: The model request containing messages and configuration.
handler: The function to call the model.
Returns:
The model response.
Raises:
StructuredOutputError: If max retries exceeded without success.
"""
for attempt in range(self.max_retries + 1):
try:
return handler(request)
except StructuredOutputError as exc:
if attempt == self.max_retries:
raise
# Include both the AI message and error in a single human message
# to maintain valid chat history alternation
ai_content = exc.ai_message.content
error_message = (
f"Your previous response was:\n{ai_content}\n\n"
f"Error: {exc}. Please try again with a valid response."
)
request.messages.append(HumanMessage(content=error_message))
# This should never be reached, but satisfies type checker
return handler(request)
| StructuredOutputRetryMiddleware |
python | doocs__leetcode | solution/2900-2999/2944.Minimum Number of Coins for Fruits/Solution2.py | {
"start": 0,
"end": 220
} | class ____:
def minimumCoins(self, prices: List[int]) -> int:
n = len(prices)
for i in range((n - 1) // 2, 0, -1):
prices[i - 1] += min(prices[i : i * 2 + 1])
return prices[0]
| Solution |
python | pypa__warehouse | tests/unit/subscriptions/test_services.py | {
"start": 14763,
"end": 15703
} | class ____:
def test_basic_init(self):
api = pretend.stub()
billing_service = GenericBillingService(
api=api,
publishable_key="secret_to_everybody",
webhook_secret="keep_it_secret_keep_it_safe",
domain="tests",
)
assert billing_service.api is api
assert billing_service.publishable_key == "secret_to_everybody"
assert billing_service.webhook_secret == "keep_it_secret_keep_it_safe"
assert billing_service.domain == "tests"
def test_notimplementederror(self):
with pytest.raises(NotImplementedError):
GenericBillingService.create_service(pretend.stub(), pretend.stub())
def test_subscription_factory():
db = pretend.stub()
context = pretend.stub()
request = pretend.stub(db=db)
service = services.subscription_factory(context, request)
assert service.db is db
| TestGenericBillingService |
python | joke2k__faker | faker/providers/automotive/fil_PH/__init__.py | {
"start": 57,
"end": 238
} | class ____(EnPhAutomotiveProvider):
"""Implement automotive provider for ``fil_PH`` locale.
There is no difference from the ``en_PH`` implementation.
"""
pass
| Provider |
python | pypa__pip | src/pip/_vendor/pygments/util.py | {
"start": 9892,
"end": 10031
} | class ____(TextIOWrapper):
# Don't close underlying buffer on destruction.
def close(self):
self.flush()
| UnclosingTextIOWrapper |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/pandas_datasource.py | {
"start": 23092,
"end": 68714
} | class ____(_PandasDatasource):
"""Adds a single-batch pandas datasource to the data context.
Args:
name: The name of this datasource.
assets: An optional dictionary whose keys are Pandas DataAsset names and whose values
are Pandas DataAsset objects.
"""
# class directive to automatically generate read_* methods for assets
ADD_READER_METHODS: ClassVar[bool] = True
# class attributes
asset_types: ClassVar[Sequence[Type[DataAsset]]] = _DYNAMIC_ASSET_TYPES + [DataFrameAsset]
# instance attributes
type: Literal["pandas"] = "pandas"
assets: List[_PandasDataAsset] = []
@override
def dict(self, _exclude_default_asset_names: bool = True, **kwargs):
"""Overriding `.dict()` so that `DEFAULT_PANDAS_DATA_ASSET_NAME` is always excluded on serialization.""" # noqa: E501 # FIXME CoP
# Overriding `.dict()` instead of `.json()` because `.json()`is only called from the outermost model, # noqa: E501 # FIXME CoP
# .dict() is called for deeply nested models.
ds_dict = super().dict(**kwargs)
if _exclude_default_asset_names:
assets = ds_dict.pop("assets", None)
if assets:
assets = [a for a in assets if a["name"] != DEFAULT_PANDAS_DATA_ASSET_NAME]
if assets:
ds_dict["assets"] = assets
return ds_dict
@override
def test_connection(self, test_assets: bool = True) -> None: ...
@staticmethod
def _validate_asset_name(asset_name: Optional[str] = None) -> str:
if asset_name == DEFAULT_PANDAS_DATA_ASSET_NAME:
raise PandasDatasourceError( # noqa: TRY003 # FIXME CoP
f"""An asset_name of {DEFAULT_PANDAS_DATA_ASSET_NAME} cannot be passed because it is a reserved name.""" # noqa: E501 # FIXME CoP
)
if not asset_name:
asset_name = DEFAULT_PANDAS_DATA_ASSET_NAME
return asset_name
def _get_batch(self, asset: _PandasDataAsset, dataframe: pd.DataFrame | None = None) -> Batch:
batch_request: BatchRequest
if isinstance(asset, DataFrameAsset):
if not isinstance(dataframe, pd.DataFrame):
raise ValueError( # noqa: TRY003, TRY004 # FIXME CoP
'Cannot execute "PandasDatasource.read_dataframe()" without a valid "dataframe" argument.' # noqa: E501 # FIXME CoP
)
batch_request = asset.build_batch_request(options={"dataframe": dataframe})
else:
batch_request = asset.build_batch_request()
return asset.get_batch(batch_request)
@public_api
def add_dataframe_asset(
self,
name: str,
batch_metadata: Optional[BatchMetadata] = None,
) -> DataFrameAsset:
"""Adds a Dataframe DataAsset to this PandasDatasource object.
Args:
name: The name of the Dataframe asset. This can be any arbitrary string.
batch_metadata: An arbitrary user defined dictionary with string keys which will get inherited by any
batches created from the asset.
Returns:
The DataFrameAsset that has been added to this datasource.
""" # noqa: E501 # FIXME CoP
asset: DataFrameAsset = DataFrameAsset(
name=name,
batch_metadata=batch_metadata or {},
)
return self._add_asset(asset=asset)
@public_api
def read_dataframe(
self,
dataframe: pd.DataFrame,
asset_name: Optional[str] = None,
batch_metadata: Optional[BatchMetadata] = None,
) -> Batch:
"""Reads a Dataframe and returns a Batch containing the data.
Args:
dataframe: The Dataframe containing the data for this data asset.
asset_name: The name of the Dataframe asset, should you wish to use it again.
batch_metadata: An arbitrary user defined dictionary with string keys which will get inherited by any
batches created from the asset.
Returns:
A Batch using an ephemeral DataFrameAsset.
""" # noqa: E501 # FIXME CoP
name: str = self._validate_asset_name(asset_name=asset_name)
asset: DataFrameAsset = self.add_dataframe_asset(
name=name,
batch_metadata=batch_metadata or {},
)
return self._get_batch(asset=asset, dataframe=dataframe)
@public_api
def add_clipboard_asset(
self,
name: str,
**kwargs,
) -> ClipboardAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a clipboard data asset to the datasource.
Args:
name: The name of the clipboard asset. This can be any arbitrary string.
**kwargs: Additional keyword arguments to pass to pandas.read_clipboard().
Returns:
The ClipboardAsset that has been added to this datasource.
"""
asset = ClipboardAsset(
name=name,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_clipboard(
self,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a clipboard and return a Batch containing the data.
Args:
asset_name: The name of the clipboard asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_clipboard().
Returns:
A Batch using an ephemeral ClipboardAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: ClipboardAsset = self.add_clipboard_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_csv_asset(
self,
name: str,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> CSVAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a CSV data asset to the datasource.
Args:
name: The name of the CSV asset. This can be any arbitrary string.
filepath_or_buffer: The path to the CSV file or a URL pointing to the CSV file.
**kwargs: Additional keyword arguments to pass to pandas.read_csv().
Returns:
The CSVAsset that has been added to this datasource.
"""
asset = CSVAsset(
name=name,
filepath_or_buffer=filepath_or_buffer, # type: ignore[call-arg] # FIXME CoP
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_csv(
self,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a CSV file and return a Batch containing the data.
Args:
filepath_or_buffer: The path to the CSV file or a URL pointing to the CSV file.
asset_name: The name of the CSV asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_csv().
Returns:
A Batch using an ephemeral CSVAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: CSVAsset = self.add_csv_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_excel_asset(
self,
name: str,
io: os.PathLike | str | bytes,
**kwargs,
) -> ExcelAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add an Excel data asset to the datasource.
Args:
name: The name of the Excel asset. This can be any arbitrary string.
io: The path to the Excel file or a URL pointing to the Excel file.
**kwargs: Additional keyword arguments to pass to pandas.read_excel().
Returns:
The ExcelAsset that has been added to this datasource.
"""
asset = ExcelAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
io=io,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_excel(
self,
io: os.PathLike | str | bytes,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read an Excel file and return a Batch containing the data.
Args:
io: The path to the Excel file or a URL pointing to the Excel file.
asset_name: The name of the Excel asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_excel().
Returns:
A Batch using an ephemeral ExcelAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: ExcelAsset = self.add_excel_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
io=io,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_feather_asset(
self,
name: str,
path: pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> FeatherAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a Feather data asset to the datasource.
Args:
name: The name of the Feather asset. This can be any arbitrary string.
path: The path to the Feather file or a URL pointing to the Feather file.
**kwargs: Additional keyword arguments to pass to pandas.read_feather().
Returns:
The FeatherAsset that has been added to this datasource.
"""
asset = FeatherAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
path=path,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_feather(
self,
path: pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a Feather file and return a Batch containing the data.
Args:
path: The path to the Feather file or a URL pointing to the Feather file.
asset_name: The name of the Feather asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_feather().
Returns:
A Batch using an ephemeral FeatherAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: FeatherAsset = self.add_feather_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
path=path,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_fwf_asset(
self,
name: str,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> FeatherAsset: # type: ignore[valid-type] # FIXME CoP
"""
Adds a Fixed Width File DataAsset to the datasource.
Args:
filepath_or_buffer: The path to the file or a URL pointing to the Feather file.
asset_name: The name of the asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_fwf().
Returns:
The FWFAsset that has been added to this datasource.
"""
asset = FWFAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_fwf(
self,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a Fixed Width File and return a Batch containing the data.
Args:
filepath_or_buffer: The path to the file or a URL pointing to the Feather file.
asset_name: The name of the asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_fwf().
Returns:
A Batch using an ephemeral FWFAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: FWFAsset = self.add_fwf_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_gbq_asset(
self,
name: str,
query: str,
**kwargs,
) -> GBQAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a GBQ data asset to the datasource.
Args:
name: The name of the GBQ asset. This can be any arbitrary string.
query: The SQL query to send to Google BigQuery.
**kwargs: Additional keyword arguments to pass to pandas.read_gbq().
Returns:
The GBQAsset that has been added to this datasource.
"""
asset = GBQAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
query=query,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_gbq(
self,
query: str,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a Google BigQuery query and return a Batch containing the data.
Args:
query: The SQL query to send to Google BigQuery.
asset_name: The name of the GBQ asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_gbq().
Returns:
A Batch using an ephemeral GBQAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: GBQAsset = self.add_gbq_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
query=query,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_hdf_asset(
self,
name: str,
path_or_buf: pd.HDFStore | os.PathLike | str,
**kwargs,
) -> HDFAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add an HDF data asset to the datasource.
Args:
name: The name of the HDF asset. This can be any arbitrary string.
path_or_buf: The path to the HDF file or a URL pointing to the HDF file.
**kwargs: Additional keyword arguments to pass to pandas.read_hdf().
Returns:
The HDFAsset that has been added to this datasource.
"""
asset = HDFAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
path_or_buf=path_or_buf,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_hdf(
self,
path_or_buf: pd.HDFStore | os.PathLike | str,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read an HDF file and return a Batch containing the data.
Args:
path_or_buf: The path to the HDF file or a URL pointing to the HDF file.
asset_name: The name of the HDF asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_hdf().
Returns:
A Batch using an ephemeral HDFAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: HDFAsset = self.add_hdf_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
path_or_buf=path_or_buf,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_html_asset(
self,
name: str,
io: os.PathLike | str,
**kwargs,
) -> HTMLAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add an HTML data asset to the datasource.
Args:
name: The name of the HTML asset. This can be any arbitrary string.
io: The path to the HTML file or a URL pointing to the HTML file.
**kwargs: Additional keyword arguments to pass to pandas.read_html().
Returns:
The HTMLAsset that has been added to this datasource.
"""
asset = HTMLAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
io=io,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_html(
self,
io: os.PathLike | str,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read an HTML file and return a Batch containing the data.
Args:
io: The path to the HTML file or a URL pointing to the HTML file.
asset_name: The name of the HTML asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_html().
Returns:
A Batch using an ephemeral HTMLAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: HTMLAsset = self.add_html_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
io=io,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_json_asset(
self,
name: str,
path_or_buf: pydantic.Json | pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> JSONAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a JSON data asset to the datasource.
Args:
name: The name of the JSON asset. This can be any arbitrary string.
path_or_buf: The path to the JSON file or a URL pointing to the JSON file.
**kwargs: Additional keyword arguments to pass to pandas.read_json().
Returns:
The JSONAsset that has been added to this datasource.
"""
asset = JSONAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
path_or_buf=path_or_buf,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_json(
self,
path_or_buf: pydantic.Json | pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a JSON file and return a Batch containing the data.
Args:
path_or_buf: The path to the JSON file or a URL pointing to the JSON file.
asset_name: The name of the JSON asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_json().
Returns:
A Batch using an ephemeral JSONAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: JSONAsset = self.add_json_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
path_or_buf=path_or_buf,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_orc_asset(
self,
name: str,
path: pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> ORCAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add an ORC file as a DataAsset to this PandasDatasource object.
Args:
name: The name to use for the ORC asset. This can be any arbitrary string.
path: The path to the ORC file.
**kwargs: Additional kwargs to pass to the ORC reader.
Returns:
The ORCAsset that has been added to this datasource.
"""
asset = ORCAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
path=path,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_orc(
self,
path: pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read an ORC file and return a Batch containing the data.
Args:
path: The path to the ORC file.
asset_name (optional): The asset name to use for the ORC file, should you wish to use or refer to it again.
**kwargs: Additional kwargs to pass to the ORC reader.
Returns:
A Batch using an ephemeral ORCAsset.
""" # noqa: E501 # FIXME CoP
name: str = self._validate_asset_name(asset_name=asset_name)
asset: ORCAsset = self.add_orc_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
path=path,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_parquet_asset(
self,
name: str,
path: pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> ParquetAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a parquet file as a DataAsset to this PandasDatasource object.
Args:
name: The name to use for the parquet asset. This can be any arbitrary string.
path: The path to the parquet file.
**kwargs: Additional kwargs to pass to the parquet reader.
Returns:
The ParquetAsset that has been added to this datasource.
"""
asset = ParquetAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
path=path,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_parquet(
self,
path: pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a parquet file and return a Batch containing the data.
Args:
path: The path to the parquet file.
asset_name (optional): The asset name to use for the parquet file, should you wish to use or refer to it again.
**kwargs: Additional kwargs to pass to the parquet reader.
Returns:
A Batch using an ephemeral ParquetAsset.
""" # noqa: E501 # FIXME CoP
name: str = self._validate_asset_name(asset_name=asset_name)
asset: ParquetAsset = self.add_parquet_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
path=path,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_pickle_asset(
self,
name: str,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> PickleAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a pickle file as a DataAsset to this PandasDatasource object.
Args:
name: The name to use for the pickle asset. This can be any arbitrary string.
filepath_or_buffer: The path to the pickle file.
**kwargs: Additional kwargs to pass to the pickle reader.
Returns:
The PickleAsset that has been added to this datasource.
"""
asset = PickleAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_pickle(
self,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a pickle file and return a Batch containing the data.
Args:
filepath_or_buffer: The path to the pickle file.
asset_name (optional): The asset name to use for the pickle file, should you wish to use or refer to it again.
**kwargs: Additional kwargs to pass to the pickle reader.
Returns:
A Batch using an ephemeral PickleAsset.
""" # noqa: E501 # FIXME CoP
name: str = self._validate_asset_name(asset_name=asset_name)
asset: PickleAsset = self.add_pickle_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_sas_asset(
self,
name: str,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> SASAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a SAS data asset to the datasource.
Args:
name: The name of the SAS asset. This can be any arbitrary string.
filepath_or_buffer: The path to the SAS file or a URL pointing to the SAS file.
**kwargs: Additional keyword arguments to pass to pandas.read_sas().
Returns:
The SASAsset that has been added to this datasource.
"""
asset = SASAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_sas(
self,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a SAS file and return a Batch containing the data.
Args:
filepath_or_buffer: The path to the SAS file or a URL pointing to the SAS file.
asset_name: The name of the SAS asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_sas().
Returns:
A Batch using an ephemeral SASAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: SASAsset = self.add_sas_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_spss_asset(
self,
name: str,
path: pydantic.FilePath,
**kwargs,
) -> SPSSAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add an SPSS data asset to the datasource.
Args:
name: The name of the SPSS asset. This can be any arbitrary string.
path: The path to the SPSS file.
**kwargs: Additional keyword arguments to pass to pandas.read_spss().
Returns:
The SPSSAsset that has been added to this datasource.
"""
asset = SPSSAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
path=path,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_spss(
self,
path: pydantic.FilePath,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read an SPSS file and return a Batch containing the data.
Args:
path: The path to the SPSS file.
asset_name: The name of the SPSS asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_spss().
Returns:
A Batch using an ephemeral SPSSAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: SPSSAsset = self.add_parquet_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
path=path,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_sql_asset(
self,
name: str,
sql: sa.select | sa.text | str, # type: ignore[valid-type] # FIXME CoP
con: sqlalchemy.Engine | sqlite3.Connection | str,
**kwargs,
) -> SQLAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a SQL data asset to the datasource.
Args:
name: The name of the SQL asset. This can be any arbitrary string.
sql: The SQL query to send to the database.
con: The SQLAlchemy connection engine or a string URL to connect to the database.
**kwargs: Additional keyword arguments to pass to pandas.read_sql().
Returns:
The SQLAsset that has been added to this datasource.
"""
asset = SQLAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
sql=sql,
con=con,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_sql(
self,
sql: sa.select | sa.text | str, # type: ignore[valid-type] # FIXME CoP
con: sqlalchemy.Engine | sqlite3.Connection | str,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a SQL query and return a Batch containing the data.
Args:
sql: The SQL query to send to the database.
con: The SQLAlchemy connection engine or a string URL to connect to the database.
asset_name: The name of the SQL asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_sql().
Returns:
A Batch using an ephemeral SQLAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: SQLAsset = self.add_sql_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
sql=sql,
con=con,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_sql_query_asset(
self,
name: str,
sql: sa.select | sa.text | str, # type: ignore[valid-type] # FIXME CoP
con: sqlalchemy.Engine | sqlite3.Connection | str,
**kwargs,
) -> SQLQueryAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a SQL query data asset to the datasource.
Args:
name: The name of the SQL query asset. This can be any arbitrary string.
sql: The SQL query to send to the database.
con: The SQLAlchemy connection engine or a string URL to connect to the database.
**kwargs: Additional keyword arguments to pass to pandas.read_sql_query().
Returns:
The SQLQueryAsset that has been added to this datasource.
"""
asset = SQLQueryAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
sql=sql,
con=con,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_sql_query(
self,
sql: sa.select | sa.text | str, # type: ignore[valid-type] # FIXME CoP
con: sqlalchemy.Engine | sqlite3.Connection | str,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a SQL query and return a Batch containing the data.
Args:
sql: The SQL query to send to the database.
con: The SQLAlchemy connection engine or a string URL to connect to the database.
asset_name: The name of the SQL query asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_sql_query().
Returns:
A Batch using an ephemeral SQLQueryAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: SQLQueryAsset = self.add_sql_query_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
sql=sql,
con=con,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_sql_table_asset(
self,
name: str,
table_name: str,
con: sqlalchemy.Engine | str,
**kwargs,
) -> SQLTableAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a SQL table data asset to the datasource.
Args:
name: The name of the SQL table asset. This can be any arbitrary string.
table_name: The name of the SQL table to read.
con: The SQLAlchemy connection engine or a string URL to connect to the database.
**kwargs: Additional keyword arguments to pass to pandas.read_sql_table().
Returns:
The SQLTableAsset that has been added to this datasource.
"""
asset = SQLTableAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
table_name=table_name,
con=con,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_sql_table(
self,
table_name: str,
con: sqlalchemy.Engine | str,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a SQL table and return a Batch containing the data.
Args:
table_name: The name of the SQL table to read.
con: The SQLAlchemy connection engine or a string URL to connect to the database.
asset_name: The name of the SQL table asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_sql_table().
Returns:
A Batch using an ephemeral SQLTableAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: SQLTableAsset = self.add_sql_table_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
table_name=table_name,
con=con,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_stata_asset(
self,
name: str,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> StataAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a Stata data asset to the datasource.
Args:
name: The name of the Stata asset. This can be any arbitrary string.
filepath_or_buffer: The path to the Stata file or a URL pointing to the Stata file.
**kwargs: Additional keyword arguments to pass to pandas.read_stata().
Returns:
The StataAsset that has been added to this datasource.
"""
asset = StataAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_stata(
self,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a Stata file and return a Batch containing the data.
Args:
filepath_or_buffer: The path to the Stata file or a URL pointing to the Stata file.
asset_name: The name of the Stata asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_stata().
Returns:
A Batch using an ephemeral StataAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: StataAsset = self.add_stata_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_table_asset(
self,
name: str,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> TableAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add a Table data asset to the datasource.
Args:
name: The name of the Table asset. This can be any arbitrary string.
filepath_or_buffer: The path to the Table file or a URL pointing to the Table file.
**kwargs: Additional keyword arguments to pass to pandas.read_table().
Returns:
The TableAsset that has been added to this datasource.
"""
asset = TableAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_table(
self,
filepath_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read a Table file and return a Batch containing the data.
Args:
filepath_or_buffer: The path to the Table file or a URL pointing to the Table file.
asset_name: The name of the Table asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_table().
Returns:
A Batch using an ephemeral TableAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: TableAsset = self.add_table_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
filepath_or_buffer=filepath_or_buffer,
**kwargs,
)
return self._get_batch(asset=asset)
@public_api
def add_xml_asset(
self,
name: str,
path_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
**kwargs,
) -> XMLAsset: # type: ignore[valid-type] # FIXME CoP
"""
Add an XML data asset to the datasource.
Args:
name: The name of the XML asset. This can be any arbitrary string.
path_or_buffer: The path to the XML file or a URL pointing to the XML file.
**kwargs: Additional keyword arguments to pass to pandas.read_xml().
Returns:
The XMLAsset that has been added to this datasource.
"""
asset = XMLAsset( # type: ignore[call-arg] # FIXME CoP
name=name,
path_or_buffer=path_or_buffer,
**kwargs,
)
return self._add_asset(asset=asset)
@public_api
def read_xml(
self,
path_or_buffer: pydantic.FilePath | pydantic.AnyUrl,
asset_name: Optional[str] = None,
**kwargs,
) -> Batch:
"""
Read an XML file and return a Batch containing the data.
Args:
path_or_buffer: The path to the XML file or a URL pointing to the XML file.
asset_name: The name of the XML asset, should you wish to use it again.
**kwargs: Additional keyword arguments to pass to pandas.read_xml().
Returns:
A Batch using an ephemeral XMLAsset.
"""
name: str = self._validate_asset_name(asset_name=asset_name)
asset: XMLAsset = self.add_xml_asset( # type: ignore[valid-type] # FIXME CoP
name=name,
path_or_buffer=path_or_buffer,
**kwargs,
)
return self._get_batch(asset=asset)
# attr-defined issue
# https://github.com/python/mypy/issues/12472
add_clipboard_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_clipboard_asset, ClipboardAsset, exclude={"type"}
)
read_clipboard.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_clipboard, ClipboardAsset, exclude={"type"}
)
add_csv_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_csv_asset, CSVAsset, exclude={"type"}
)
read_csv.__signature__ = _merge_signatures(read_csv, CSVAsset, exclude={"type"}) # type: ignore[attr-defined] # FIXME CoP
add_excel_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_excel_asset, ExcelAsset, exclude={"type"}
)
read_excel.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_excel, ExcelAsset, exclude={"type"}
)
add_feather_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_feather_asset, FeatherAsset, exclude={"type"}
)
read_feather.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_feather, FeatherAsset, exclude={"type"}
)
add_fwf_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_fwf_asset, FWFAsset, exclude={"type"}
)
read_fwf.__signature__ = _merge_signatures(read_fwf, FWFAsset, exclude={"type"}) # type: ignore[attr-defined] # FIXME CoP
add_gbq_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_gbq_asset, GBQAsset, exclude={"type"}
)
read_gbq.__signature__ = _merge_signatures(read_gbq, GBQAsset, exclude={"type"}) # type: ignore[attr-defined] # FIXME CoP
add_hdf_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_hdf_asset, HDFAsset, exclude={"type"}
)
read_hdf.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_hdf, HDFAsset, exclude={"type"}
)
add_html_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_html_asset, HTMLAsset, exclude={"type"}
)
read_html.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_html, HTMLAsset, exclude={"type"}
)
add_json_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_json_asset, JSONAsset, exclude={"type"}
)
read_json.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_json, JSONAsset, exclude={"type"}
)
add_orc_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_orc_asset, ORCAsset, exclude={"type"}
)
read_orc.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_orc, ORCAsset, exclude={"type"}
)
add_parquet_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_parquet_asset, ParquetAsset, exclude={"type"}
)
read_parquet.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_parquet, ParquetAsset, exclude={"type"}
)
add_pickle_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_pickle_asset, PickleAsset, exclude={"type"}
)
read_pickle.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_pickle, PickleAsset, exclude={"type"}
)
add_sas_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_sas_asset, SASAsset, exclude={"type"}
)
read_sas.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_sas, SASAsset, exclude={"type"}
)
add_spss_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_spss_asset, SPSSAsset, exclude={"type"}
)
read_spss.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_spss, SPSSAsset, exclude={"type"}
)
add_sql_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_sql_asset, SQLAsset, exclude={"type"}
)
read_sql.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_sql, SQLAsset, exclude={"type"}
)
add_sql_query_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_sql_query_asset, SQLQueryAsset, exclude={"type"}
)
read_sql_query.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_sql_query, SQLQueryAsset, exclude={"type"}
)
add_sql_table_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_sql_table_asset, SQLTableAsset, exclude={"type"}
)
read_sql_table.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_sql_table, SQLTableAsset, exclude={"type"}
)
add_stata_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_stata_asset, StataAsset, exclude={"type"}
)
read_stata.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_stata, StataAsset, exclude={"type"}
)
add_table_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_table_asset, TableAsset, exclude={"type"}
)
read_table.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_table, TableAsset, exclude={"type"}
)
add_xml_asset.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
add_xml_asset, XMLAsset, exclude={"type"}
)
read_xml.__signature__ = _merge_signatures( # type: ignore[attr-defined] # FIXME CoP
read_xml, XMLAsset, exclude={"type"}
)
| PandasDatasource |
python | kubernetes-client__python | kubernetes/client/models/v1_resource_claim_template_spec.py | {
"start": 383,
"end": 4334
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'metadata': 'V1ObjectMeta',
'spec': 'V1ResourceClaimSpec'
}
attribute_map = {
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1ResourceClaimTemplateSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._metadata = None
self._spec = None
self.discriminator = None
if metadata is not None:
self.metadata = metadata
self.spec = spec
@property
def metadata(self):
"""Gets the metadata of this V1ResourceClaimTemplateSpec. # noqa: E501
:return: The metadata of this V1ResourceClaimTemplateSpec. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ResourceClaimTemplateSpec.
:param metadata: The metadata of this V1ResourceClaimTemplateSpec. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1ResourceClaimTemplateSpec. # noqa: E501
:return: The spec of this V1ResourceClaimTemplateSpec. # noqa: E501
:rtype: V1ResourceClaimSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1ResourceClaimTemplateSpec.
:param spec: The spec of this V1ResourceClaimTemplateSpec. # noqa: E501
:type: V1ResourceClaimSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ResourceClaimTemplateSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ResourceClaimTemplateSpec):
return True
return self.to_dict() != other.to_dict()
| V1ResourceClaimTemplateSpec |
python | mwaskom__seaborn | seaborn/_core/typing.py | {
"start": 1413,
"end": 1481
} | class ____:
def __repr__(self):
return "<default>"
| Default |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec26.py | {
"start": 223,
"end": 535
} | class ____(Generic[P]):
def __init__(self, func: Callable[P, Any]) -> None: ...
def func1(a: A[Concatenate[int, P]]) -> A[P]: ...
def func2(a: int, b: str) -> str: ...
val1 = A(func2)
reveal_type(val1, expected_text="A[(a: int, b: str)]")
val2 = func1(val1)
reveal_type(val2, expected_text="A[(b: str)]")
| A |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 24535,
"end": 25066
} | class ____(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs")
node: Expr
args: t.List[Expr]
kwargs: t.List[Keyword]
dyn_args: t.Optional[Expr]
dyn_kwargs: t.Optional[Expr]
| Call |
python | kamyu104__LeetCode-Solutions | Python/report-spam-message.py | {
"start": 46,
"end": 351
} | class ____(object):
def reportSpam(self, message, bannedWords):
"""
:type message: List[str]
:type bannedWords: List[str]
:rtype: bool
"""
THRESHOLD = 2
lookup = set(bannedWords)
return sum(m in lookup for m in message) >= THRESHOLD
| Solution |
python | py-pdf__pypdf | pypdf/generic/_files.py | {
"start": 758,
"end": 16238
} | class ____:
"""
Container holding the information on an embedded file.
Attributes are evaluated lazily if possible.
Further information on embedded files can be found in section 7.11 of the PDF 2.0 specification.
"""
def __init__(self, name: str, pdf_object: DictionaryObject, parent: ArrayObject | None = None) -> None:
"""
Args:
name: The (primary) name as provided in the name tree.
pdf_object: The corresponding PDF object to allow retrieving further data.
parent: The parent list.
"""
self._name = name
self.pdf_object = pdf_object
self._parent = parent
@property
def name(self) -> str:
"""The (primary) name of the embedded file as provided in the name tree."""
return self._name
@classmethod
def _create_new(cls, writer: PdfWriter, name: str, content: str | bytes) -> EmbeddedFile:
"""
Create a new embedded file and add it to the PdfWriter.
Args:
writer: The PdfWriter instance to add the embedded file to.
name: The filename to display.
content: The data in the file.
Returns:
EmbeddedFile instance for the newly created embedded file.
"""
# Convert string content to bytes if needed
if isinstance(content, str):
content = content.encode("latin-1")
# Create the file entry (the actual embedded file stream)
file_entry = DecodedStreamObject()
file_entry.set_data(content)
file_entry.update({NameObject(PG.TYPE): NameObject("/EmbeddedFile")})
# Create the /EF entry
ef_entry = DictionaryObject()
ef_entry.update({NameObject("/F"): writer._add_object(file_entry)})
# Create the filespec dictionary
from pypdf.generic import create_string_object # noqa: PLC0415
filespec = DictionaryObject()
filespec_reference = writer._add_object(filespec)
name_object = cast(TextStringObject, create_string_object(name))
filespec.update(
{
NameObject(PG.TYPE): NameObject("/Filespec"),
NameObject(FileSpecificationDictionaryEntries.F): name_object,
NameObject(FileSpecificationDictionaryEntries.EF): ef_entry,
}
)
# Add the name and filespec to the names array.
# We use the inverse order for insertion, as this allows us to re-use the
# same index.
names_array = cls._get_names_array(writer)
insertion_index = cls._get_insertion_index(names_array, name_object)
names_array.insert(insertion_index, filespec_reference)
names_array.insert(insertion_index, name_object)
# Return an EmbeddedFile instance
return cls(name=name, pdf_object=filespec, parent=names_array)
@classmethod
def _get_names_array(cls, writer: PdfWriter) -> ArrayObject:
"""Get the names array for embedded files, possibly creating and flattening it."""
if CA.NAMES not in writer.root_object:
# Add the /Names entry to the catalog.
writer.root_object[NameObject(CA.NAMES)] = writer._add_object(DictionaryObject())
names_dict = cast(DictionaryObject, writer.root_object[CA.NAMES])
if "/EmbeddedFiles" not in names_dict:
# We do not yet have an entry for embedded files. Create and return it.
names = ArrayObject()
embedded_files_names_dictionary = DictionaryObject(
{NameObject(CA.NAMES): names}
)
names_dict[NameObject("/EmbeddedFiles")] = writer._add_object(embedded_files_names_dictionary)
return names
# We have an existing embedded files entry.
embedded_files_names_tree = cast(DictionaryObject, names_dict["/EmbeddedFiles"])
if "/Names" in embedded_files_names_tree:
# Simple case: We already have a flat list.
return cast(ArrayObject, embedded_files_names_tree[NameObject(CA.NAMES)])
if "/Kids" not in embedded_files_names_tree:
# Invalid case: This is no name tree.
raise PdfReadError("Got neither Names nor Kids in embedded files tree.")
# Complex case: Convert a /Kids-based name tree to a /Names-based one.
# /Name-based ones are much easier to handle and allow us to simplify the
# actual insertion logic by only having to consider one case.
names = ArrayObject()
kids = cast(ArrayObject, embedded_files_names_tree["/Kids"].get_object())
embedded_files_names_dictionary = DictionaryObject(
{NameObject(CA.NAMES): names}
)
names_dict[NameObject("/EmbeddedFiles")] = writer._add_object(embedded_files_names_dictionary)
for kid in kids:
# Write the flattened file entries. As we do not change the actual files,
# this should not have any impact on references to them.
# There might be further (nested) kids here.
# Wait for an example before evaluating an implementation.
for name in kid.get_object().get("/Names", []):
names.append(name)
return names
@classmethod
def _get_insertion_index(cls, names_array: ArrayObject, name: str) -> int:
keys = [names_array[i].encode("utf-8") for i in range(0, len(names_array), 2)]
name_bytes = name.encode("utf-8")
start = bisect.bisect_left(keys, name_bytes)
end = bisect.bisect_right(keys, name_bytes)
if start != end:
return end * 2
if start == 0:
return 0
if start == (key_count := len(keys)):
return key_count * 2
return end * 2
@property
def alternative_name(self) -> str | None:
"""Retrieve the alternative name (file specification)."""
for key in [FileSpecificationDictionaryEntries.UF, FileSpecificationDictionaryEntries.F]:
# PDF 2.0 reference, table 43:
# > A PDF reader shall use the value of the UF key, when present, instead of the F key.
if key in self.pdf_object:
value = self.pdf_object[key].get_object()
if not is_null_or_none(value):
return cast(str, value)
return None
@alternative_name.setter
def alternative_name(self, value: TextStringObject | None) -> None:
"""Set the alternative name (file specification)."""
if value is None:
if FileSpecificationDictionaryEntries.UF in self.pdf_object:
self.pdf_object[NameObject(FileSpecificationDictionaryEntries.UF)] = NullObject()
if FileSpecificationDictionaryEntries.F in self.pdf_object:
self.pdf_object[NameObject(FileSpecificationDictionaryEntries.F)] = NullObject()
else:
self.pdf_object[NameObject(FileSpecificationDictionaryEntries.UF)] = value
self.pdf_object[NameObject(FileSpecificationDictionaryEntries.F)] = value
@property
def description(self) -> str | None:
"""Retrieve the description."""
value = self.pdf_object.get(FileSpecificationDictionaryEntries.DESC)
if is_null_or_none(value):
return None
return value
@description.setter
def description(self, value: TextStringObject | None) -> None:
"""Set the description."""
if value is None:
self.pdf_object[NameObject(FileSpecificationDictionaryEntries.DESC)] = NullObject()
else:
self.pdf_object[NameObject(FileSpecificationDictionaryEntries.DESC)] = value
@property
def associated_file_relationship(self) -> str:
"""Retrieve the relationship of the referring document to this embedded file."""
return self.pdf_object.get("/AFRelationship", "/Unspecified")
@associated_file_relationship.setter
def associated_file_relationship(self, value: NameObject) -> None:
"""Set the relationship of the referring document to this embedded file."""
self.pdf_object[NameObject("/AFRelationship")] = value
@property
def _embedded_file(self) -> StreamObject:
"""Retrieve the actual embedded file stream."""
if "/EF" not in self.pdf_object:
raise PdfReadError(f"/EF entry not found: {self.pdf_object}")
ef = cast(DictionaryObject, self.pdf_object["/EF"])
for key in [FileSpecificationDictionaryEntries.UF, FileSpecificationDictionaryEntries.F]:
if key in ef:
return cast(StreamObject, ef[key].get_object())
raise PdfReadError(f"No /(U)F key found in file dictionary: {ef}")
@property
def _params(self) -> DictionaryObject:
"""Retrieve the file-specific parameters."""
return self._embedded_file.get("/Params", DictionaryObject()).get_object()
@cached_property
def _ensure_params(self) -> DictionaryObject:
"""Ensure the /Params dictionary exists and return it."""
embedded_file = self._embedded_file
if "/Params" not in embedded_file:
embedded_file[NameObject("/Params")] = DictionaryObject()
return cast(DictionaryObject, embedded_file["/Params"])
@property
def subtype(self) -> str | None:
"""Retrieve the subtype. This is a MIME media type, prefixed by a slash."""
value = self._embedded_file.get("/Subtype")
if is_null_or_none(value):
return None
return value
@subtype.setter
def subtype(self, value: NameObject | None) -> None:
"""Set the subtype. This should be a MIME media type, prefixed by a slash."""
embedded_file = self._embedded_file
if value is None:
embedded_file[NameObject("/Subtype")] = NullObject()
else:
embedded_file[NameObject("/Subtype")] = value
@property
def content(self) -> bytes:
"""Retrieve the actual file content."""
return self._embedded_file.get_data()
@content.setter
def content(self, value: str | bytes) -> None:
"""Set the file content."""
if isinstance(value, str):
value = value.encode("latin-1")
self._embedded_file.set_data(value)
@property
def size(self) -> int | None:
"""Retrieve the size of the uncompressed file in bytes."""
value = self._params.get("/Size")
if is_null_or_none(value):
return None
return value
@size.setter
def size(self, value: NumberObject | None) -> None:
"""Set the size of the uncompressed file in bytes."""
params = self._ensure_params
if value is None:
params[NameObject("/Size")] = NullObject()
else:
params[NameObject("/Size")] = value
@property
def creation_date(self) -> datetime.datetime | None:
"""Retrieve the file creation datetime."""
return parse_iso8824_date(self._params.get("/CreationDate"))
@creation_date.setter
def creation_date(self, value: datetime.datetime | None) -> None:
"""Set the file creation datetime."""
params = self._ensure_params
if value is None:
params[NameObject("/CreationDate")] = NullObject()
else:
date_str = format_iso8824_date(value)
params[NameObject("/CreationDate")] = TextStringObject(date_str)
@property
def modification_date(self) -> datetime.datetime | None:
"""Retrieve the datetime of the last file modification."""
return parse_iso8824_date(self._params.get("/ModDate"))
@modification_date.setter
def modification_date(self, value: datetime.datetime | None) -> None:
"""Set the datetime of the last file modification."""
params = self._ensure_params
if value is None:
params[NameObject("/ModDate")] = NullObject()
else:
date_str = format_iso8824_date(value)
params[NameObject("/ModDate")] = TextStringObject(date_str)
@property
def checksum(self) -> bytes | None:
"""Retrieve the MD5 checksum of the (uncompressed) file."""
value = self._params.get("/CheckSum")
if is_null_or_none(value):
return None
return value
@checksum.setter
def checksum(self, value: ByteStringObject | None) -> None:
"""Set the MD5 checksum of the (uncompressed) file."""
params = self._ensure_params
if value is None:
params[NameObject("/CheckSum")] = NullObject()
else:
params[NameObject("/CheckSum")] = value
def delete(self) -> None:
"""Delete the file from the document."""
if not self._parent:
raise PyPdfError("Parent required to delete file from document.")
if self.pdf_object in self._parent:
index = self._parent.index(self.pdf_object)
elif (
(indirect_reference := getattr(self.pdf_object, "indirect_reference", None)) is not None
and indirect_reference in self._parent
):
index = self._parent.index(indirect_reference)
else:
raise PyPdfError("File not found in parent object.")
self._parent.pop(index) # Reference.
self._parent.pop(index - 1) # Name.
self.pdf_object = DictionaryObject() # Invalidate.
def __repr__(self) -> str:
return f"<{self.__class__.__name__} name={self.name!r}>"
@classmethod
def _load_from_names(cls, names: ArrayObject) -> Generator[EmbeddedFile]:
"""
Convert the given name tree into class instances.
Args:
names: The name tree to load the data from.
Returns:
Iterable of class instances for the files found.
"""
# This is a name tree of the format [name_1, reference_1, name_2, reference_2, ...]
for i, name in enumerate(names):
if not isinstance(name, str):
# Skip plain strings and retrieve them as `direct_name` by index.
file_dictionary = name.get_object()
direct_name = names[i - 1].get_object()
yield EmbeddedFile(name=direct_name, pdf_object=file_dictionary, parent=names)
@classmethod
def _load(cls, catalog: DictionaryObject) -> Generator[EmbeddedFile]:
"""
Load the embedded files for the given document catalog.
This method and its signature are considered internal API and thus not exposed publicly for now.
Args:
catalog: The document catalog to load from.
Returns:
Iterable of class instances for the files found.
"""
try:
container = cast(
DictionaryObject,
cast(DictionaryObject, catalog["/Names"])["/EmbeddedFiles"],
)
except KeyError:
return
if "/Kids" in container:
for kid in cast(ArrayObject, container["/Kids"].get_object()):
# There might be further (nested) kids here.
# Wait for an example before evaluating an implementation.
kid = kid.get_object()
if "/Names" in kid:
yield from cls._load_from_names(cast(ArrayObject, kid["/Names"]))
if "/Names" in container:
yield from cls._load_from_names(cast(ArrayObject, container["/Names"]))
| EmbeddedFile |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_flows.py | {
"start": 306,
"end": 419
} | class ____:
tainted_attribute: List[int] = []
tainted_class_attribute: List[int] = []
not_tainted = 2
| C |
python | charliermarsh__ruff | python/ruff-ecosystem/ruff_ecosystem/check.py | {
"start": 10157,
"end": 13489
} | class ____:
"""
The number of additions and removals by rule code.
While the attributes are frozen to avoid accidentally changing the value of an attribute,
the counters themselves are mutable and this class can be mutated with `+` and `update`.
"""
added_violations: Counter = field(default_factory=Counter)
removed_violations: Counter = field(default_factory=Counter)
added_fixes: Counter = field(default_factory=Counter)
removed_fixes: Counter = field(default_factory=Counter)
def rule_codes(self) -> set[str]:
return (
set(self.added_violations.keys())
.union(self.removed_violations.keys())
.union(self.added_fixes.keys())
.union(self.removed_fixes.keys())
)
def __add__(self, other: Self) -> Self:
if not isinstance(other, type(self)):
return NotImplemented
new = type(self)()
new.update(self)
new.update(other)
return new
def update(self, other: Self) -> Self:
self.added_violations.update(other.added_violations)
self.removed_violations.update(other.removed_violations)
self.added_fixes.update(other.added_fixes)
self.removed_fixes.update(other.removed_fixes)
return self
def total_added_violations(self) -> int:
return sum(self.added_violations.values())
def total_removed_violations(self) -> int:
return sum(self.removed_violations.values())
def total_added_fixes(self) -> int:
return sum(self.added_fixes.values())
def total_removed_fixes(self) -> int:
return sum(self.removed_fixes.values())
def total_changes_by_rule(self) -> Iterator[tuple[str, int]]:
"""
Yields the sum of changes for each rule
"""
totals = Counter()
totals.update(self.added_violations)
totals.update(self.removed_violations)
totals.update(self.added_fixes)
totals.update(self.removed_fixes)
yield from totals.items()
@classmethod
def from_diff(cls: type[Self], diff: CheckDiff) -> Self:
"""
Parse a diff from `ruff check` to determine the additions and removals for each rule
"""
rule_changes = cls()
for line in diff.parsed_lines:
if line.is_added:
if line in diff.fix_only_lines:
if line.fix_available:
rule_changes.added_fixes[line.rule_code] += 1
else:
rule_changes.removed_fixes[line.rule_code] += 1
else:
rule_changes.added_violations[line.rule_code] += 1
elif line.is_removed:
if line in diff.fix_only_lines:
if line.fix_available:
rule_changes.removed_fixes[line.rule_code] += 1
else:
rule_changes.added_fixes[line.rule_code] += 1
else:
rule_changes.removed_violations[line.rule_code] += 1
return rule_changes
def __bool__(self):
return bool(
self.added_violations
or self.removed_violations
or self.added_fixes
or self.removed_fixes
)
@dataclass(frozen=True)
| RuleChanges |
python | kamyu104__LeetCode-Solutions | Python/number-of-subarrays-having-even-product.py | {
"start": 381,
"end": 674
} | class ____(object):
def evenProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = cnt = 0
for i, x in enumerate(nums):
if x%2 == 0:
cnt = i+1
result += cnt
return result
| Solution2 |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/resource_requirement.py | {
"start": 4424,
"end": 5034
} | class ____(ResourceKeyRequirement):
key: str # pyright: ignore[reportIncompatibleMethodOverride]
asset_key: Optional[str]
@property
def expected_type(self) -> type:
from dagster._core.storage.io_manager import IOManagerDefinition
return IOManagerDefinition
def describe_requirement(self) -> str:
external_asset_descriptor = (
f"external asset with key {self.asset_key}" if self.asset_key else "external asset"
)
return f"io manager with key '{self.key}' required by {external_asset_descriptor}"
@record
| ExternalAssetIOManagerRequirement |
python | python-pillow__Pillow | src/PIL/ImageFilter.py | {
"start": 3913,
"end": 4226
} | class ____(RankFilter):
"""
Create a max filter. Picks the largest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Max"
def __init__(self, size: int = 3) -> None:
self.size = size
self.rank = size * size - 1
| MaxFilter |
python | tensorflow__tensorflow | tensorflow/dtensor/python/layout.py | {
"start": 1834,
"end": 13333
} | class ____(_pywrap_dtensor_device.Mesh):
"""Represents a Mesh configuration over a certain list of Mesh Dimensions.
A mesh consists of named dimensions with sizes, which describe how a set of
devices are arranged. Defining tensor layouts in terms of mesh dimensions
allows us to efficiently determine the communication required when computing
an operation with tensors of different layouts.
A mesh provides information not only about the placement of the tensors but
also the topology of the underlying devices. For example, we can group 8 TPUs
as a 1-D array for data parallelism or a `2x4` grid for (2-way) data
parallelism and (4-way) model parallelism.
Refer to [DTensor Concepts](https://www.tensorflow.org/guide/dtensor_overview)
for in depth discussion and examples.
Note: the utilities `dtensor.create_mesh` and
`dtensor.create_distributed_mesh` provide a simpler API to create meshes for
single- or multi-client use cases.
"""
def __init__(
self,
dim_names: List[str],
global_device_ids: np.ndarray,
local_device_ids: List[int],
local_devices: List[Union[tf_device.DeviceSpec, str]],
mesh_name: str = '',
global_devices: Optional[List[Union[tf_device.DeviceSpec, str]]] = None,
use_xla_spmd: bool = USE_XLA_SPMD,
):
"""Builds a Mesh.
The `dim_names` and `global_device_ids` arguments describe the dimension
names and shape for the mesh.
For example,
```python
dim_names = ('x', 'y'),
global_device_ids = [[0, 1],
[2, 3],
[4, 5]]
```
defines a 2D mesh of shape 3x2. A reduction over the 'x' dimension will
reduce across columns (0, 2, 4) and (1, 3, 5), and a reduction over the 'y'
dimension reduces across rows.
Note: the utilities `dtensor.create_mesh` and
`dtensor.create_distributed_mesh` provide a simpler API to create meshes for
single- or multi-client use cases.
Args:
dim_names: A list of strings indicating dimension names.
global_device_ids: An ndarray of global device IDs is used to compose
DeviceSpecs describing the mesh. The shape of this array determines the
size of each mesh dimension. Values in this array should increment
sequentially from 0. This argument is the same for every DTensor client.
local_device_ids: A list of local device IDs equal to a subset of values
in global_device_ids. They indicate the position of local devices in the
global mesh. Different DTensor clients must contain distinct
local_device_ids contents. All local_device_ids from all DTensor clients
must cover every element in global_device_ids.
local_devices: The list of devices hosted locally. The elements correspond
1:1 to those of local_device_ids.
mesh_name: The name of the mesh. Currently, this is rarely used, and is
mostly used to indicate whether it is a CPU, GPU, or TPU-based mesh.
global_devices (optional): The list of global devices. Set when multiple
device meshes are in use.
use_xla_spmd (optional): Boolean when True, will use XLA SPMD instead of
DTensor SPMD.
"""
# Check if input args are valid.
if not isinstance(global_device_ids, np.ndarray):
raise ValueError('Variable global_device_ids must be an ndarray.')
if global_device_ids.size == 0:
raise ValueError('Variable global_device_ids must be non-empty.')
flat_global_device_ids = global_device_ids.flatten()
# global_device_ids are expected to be consecutive numbers.
# LINT.IfChange
distance = flat_global_device_ids[0]
if any(
(gid - i != distance) for i, gid in enumerate(flat_global_device_ids)):
raise ValueError('global_device_ids must sequentially increase: %s' %
global_device_ids)
# LINT.ThenChange(//tensorflow/dtensor/cc/dtensor_device.cc)
# TODO(b/242201545): This class is only for args type transformation for
# exported C++ Mesh class after the unification is complete. Any other
# logics should reside in the C++ layer, including validation checks, shall
# go to C++.
if len(dim_names) != global_device_ids.ndim:
raise ValueError(
'Number of mesh dimensions does not match number of dimension names.')
if not isinstance(local_device_ids, list):
raise ValueError('Variable local_device_ids must be a list of integers.')
if not isinstance(local_devices, list):
raise ValueError('Variable local_devices must be a list of DeviceSpecs.')
if global_devices and not isinstance(global_devices, list):
raise ValueError('Variable global_devices must be a list of DeviceSpecs.')
if not local_devices and not global_devices:
raise ValueError('Empty list of devices not allowed.')
# Transform args format for C++ Mesh constructor
global_device_ids_flatten = global_device_ids.flatten()
global_device_ids_shape = global_device_ids.shape
def to_str(d) -> str:
if isinstance(d, tf_device.DeviceSpec):
return d.to_string()
return d
def to_spec(d) -> tf_device.DeviceSpec:
if not isinstance(d, tf_device.DeviceSpec):
return tf_device.DeviceSpec.from_string(d)
return d
local_devices_str = [to_str(d) for d in local_devices]
local_devices_spec = [to_spec(d) for d in local_devices]
if not global_devices:
global_devices = []
global_devices_str = [to_str(d) for d in global_devices]
global_devices_spec = [to_spec(d) for d in global_devices]
local_devices_set = set(local_devices_spec)
local_device_only_contains_host_cpu = (
len(local_devices_set) == 1 and
list(local_devices_set)[0].device_type == 'CPU')
if not local_device_only_contains_host_cpu and len(local_devices) != len(
local_devices_set):
raise ValueError('Duplicate devices found in mesh specification %s.' %
[d for d in local_devices if local_devices.count(d) > 1])
if len(local_device_ids) != len(local_devices):
raise ValueError(
'Variable local_device_ids does not have same size as local_devices.')
if len(local_device_ids) > len(np.ravel(global_device_ids)):
raise ValueError('Cannot have more local than gobal device IDs.')
device_types = set([device.device_type for device in local_devices_spec])
if not device_types:
device_types = set([device.device_type for device in global_devices_spec])
if None in device_types:
raise ValueError('device_type is required')
if len(device_types) > 1:
raise ValueError('Devices containing multiple device_types : %s' %
device_types)
device_type = device_types.pop()
if use_xla_spmd and device_type != 'TPU':
raise ValueError('XLA SPMD is not currently not supported for %s mesh.' %
device_type)
super().__init__(
mesh_name,
dim_names,
global_device_ids_shape,
global_device_ids_flatten,
global_devices_str,
local_device_ids,
local_devices_str,
use_xla_spmd,
)
@classmethod
def _new_object(cls, *args, **kwargs):
# Need to explicitly invoke the base class __init__ because
# Mesh.__init__ overrode it with a different signature.
self = _pywrap_dtensor_device.Mesh.__new__(cls)
super().__init__(self, *args, **kwargs)
return self
def global_device_ids(self) -> np.ndarray:
"""Returns a global device list as an array."""
return np.array(super().global_device_ids(), dtype=np.int64).reshape(
self.shape()
)
def __getitem__(self, dim_name: str) -> MeshDimension:
return MeshDimension(name=dim_name, size=self.dim_size(dim_name))
def __hash__(self):
return hash(self.as_proto().SerializeToString(deterministic=True))
def __repr__(self) -> str:
return f'Mesh.from_string({self.to_string()})'
# TODO(panzf): change to pybind11 pickle implementation in the last step
def __reduce__(self):
return Mesh.from_string, (self.to_string(),)
# TODO(b/242201545): implement this in Mesh C++ class
def coords(self, device_idx: int) -> tensor.Tensor:
"""Converts the device index into a tensor of mesh coordinates."""
strides = ops.convert_to_tensor(self.strides)
shape = ops.convert_to_tensor(self.shape())
return (device_idx // strides) % shape
@classmethod
def from_proto(cls, proto: layout_pb2.MeshProto) -> 'Mesh':
"""Construct a mesh instance from input `proto`."""
return cls._new_object(mesh_proto=proto)
@classmethod
def from_string(cls, mesh_str: str) -> 'Mesh':
return cls._new_object(mesh_str=mesh_str)
@classmethod
def from_device(cls, device: str) -> 'Mesh':
"""Constructs a single device mesh from a device string."""
return cls._new_object(single_device=device)
@classmethod
def _from_mesh(cls, mesh: _pywrap_dtensor_device.Mesh):
"""Creates a copy from an existing pywrap mesh object."""
return cls._new_object(mesh=mesh)
@functools.cached_property
def _host_mesh(self) -> 'Mesh':
return Mesh._from_mesh(super().host_mesh())
def host_mesh(self) -> 'Mesh':
"""Returns a host mesh."""
# TODO(b/242201545): Find a way to get the super class to return correct
# typed objects.
return self._host_mesh
# TODO(b/242201545): implement this in Mesh C++ class
def local_device_locations(self) -> List[Dict[str, int]]:
"""Returns a list of local device locations.
A device location is a dictionary from dimension names to indices on those
dimensions.
"""
mapping = self.unravel_index()
return [mapping[device_id] for device_id in self.local_device_ids()]
# TODO(b/242201545): implement this in Mesh C++ class
@property
def strides(self) -> List[int]:
"""Returns the strides tensor array for this mesh.
If the mesh shape is `[a, b, c, d]`, then the strides array can be computed
as `[b*c*d, c*d, d, 1]`. This array can be useful in computing local device
offsets given a device ID. Using the same example, the device coordinates of
the mesh can be computed as:
```
[(device_id / (b*c*d)) % a,
(device_id / (c*d)) % b,
(device_id / (d)) % c,
(device_id) % d]
```
This is the same as `(device_id // mesh.strides) % mesh.shape`.
Returns:
The mesh strides as an integer tensor.
"""
return _compute_mesh_strides(self.shape())
# TODO(b/242201545): implement this in Mesh C++ class
def unravel_index(self):
"""Returns a dictionary from device ID to {dim_name: dim_index}.
For example, for a 3x2 mesh, return this:
```
{ 0: {'x': 0, 'y', 0},
1: {'x': 0, 'y', 1},
2: {'x': 1, 'y', 0},
3: {'x': 1, 'y', 1},
4: {'x': 2, 'y', 0},
5: {'x': 2, 'y', 1} }
```
"""
idx_ranges = [range(self.dim_size(dim_name)) for dim_name in self.dim_names]
mesh_pos = itertools.product(*idx_ranges)
mapping = {}
for device_id, device_pos in enumerate(mesh_pos):
device_loc = {}
for dim_name, dim_index in zip(self.dim_names, device_pos):
device_loc[dim_name] = dim_index
mapping[device_id] = device_loc
return mapping
LayoutType = _pywrap_dtensor_device.LayoutType
# TODO(hthu): Consider making this class immutable.
@tf_export('experimental.dtensor.Layout', v1=[])
| Mesh |
python | matplotlib__matplotlib | lib/mpl_toolkits/axes_grid1/axes_divider.py | {
"start": 11575,
"end": 18221
} | class ____(Divider):
"""
Divider based on the preexisting axes.
"""
def __init__(self, axes, xref=None, yref=None):
"""
Parameters
----------
axes : :class:`~matplotlib.axes.Axes`
xref
yref
"""
self._axes = axes
if xref is None:
self._xref = Size.AxesX(axes)
else:
self._xref = xref
if yref is None:
self._yref = Size.AxesY(axes)
else:
self._yref = yref
super().__init__(fig=axes.get_figure(), pos=None,
horizontal=[self._xref], vertical=[self._yref],
aspect=None, anchor="C")
def _get_new_axes(self, *, axes_class=None, **kwargs):
axes = self._axes
if axes_class is None:
axes_class = type(axes)
return axes_class(axes.get_figure(), axes.get_position(original=True),
**kwargs)
def new_horizontal(self, size, pad=None, pack_start=False, **kwargs):
"""
Helper method for ``append_axes("left")`` and ``append_axes("right")``.
See the documentation of `append_axes` for more details.
:meta private:
"""
if pad is None:
pad = mpl.rcParams["figure.subplot.wspace"] * self._xref
pos = "left" if pack_start else "right"
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad, fraction_ref=self._xref)
self.append_size(pos, pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size, fraction_ref=self._xref)
self.append_size(pos, size)
locator = self.new_locator(
nx=0 if pack_start else len(self._horizontal) - 1,
ny=self._yrefindex)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def new_vertical(self, size, pad=None, pack_start=False, **kwargs):
"""
Helper method for ``append_axes("top")`` and ``append_axes("bottom")``.
See the documentation of `append_axes` for more details.
:meta private:
"""
if pad is None:
pad = mpl.rcParams["figure.subplot.hspace"] * self._yref
pos = "bottom" if pack_start else "top"
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad, fraction_ref=self._yref)
self.append_size(pos, pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size, fraction_ref=self._yref)
self.append_size(pos, size)
locator = self.new_locator(
nx=self._xrefindex,
ny=0 if pack_start else len(self._vertical) - 1)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def append_axes(self, position, size, pad=None, *, axes_class=None,
**kwargs):
"""
Add a new axes on a given side of the main axes.
Parameters
----------
position : {"left", "right", "bottom", "top"}
Where the new axes is positioned relative to the main axes.
size : :mod:`~mpl_toolkits.axes_grid1.axes_size` or float or str
The axes width or height. float or str arguments are interpreted
as ``axes_size.from_any(size, AxesX(<main_axes>))`` for left or
right axes, and likewise with ``AxesY`` for bottom or top axes.
pad : :mod:`~mpl_toolkits.axes_grid1.axes_size` or float or str
Padding between the axes. float or str arguments are interpreted
as for *size*. Defaults to :rc:`figure.subplot.wspace` times the
main Axes width (left or right axes) or :rc:`figure.subplot.hspace`
times the main Axes height (bottom or top axes).
axes_class : subclass type of `~.axes.Axes`, optional
The type of the new axes. Defaults to the type of the main axes.
**kwargs
All extra keywords arguments are passed to the created axes.
"""
create_axes, pack_start = _api.check_getitem({
"left": (self.new_horizontal, True),
"right": (self.new_horizontal, False),
"bottom": (self.new_vertical, True),
"top": (self.new_vertical, False),
}, position=position)
ax = create_axes(
size, pad, pack_start=pack_start, axes_class=axes_class, **kwargs)
self._fig.add_axes(ax)
return ax
def get_aspect(self):
if self._aspect is None:
aspect = self._axes.get_aspect()
if aspect == "auto":
return False
else:
return True
else:
return self._aspect
def get_position(self):
if self._pos is None:
bbox = self._axes.get_position(original=True)
return bbox.bounds
else:
return self._pos
def get_anchor(self):
if self._anchor is None:
return self._axes.get_anchor()
else:
return self._anchor
def get_subplotspec(self):
return self._axes.get_subplotspec()
# Helper for HBoxDivider/VBoxDivider.
# The variable names are written for a horizontal layout, but the calculations
# work identically for vertical layouts.
def _locate(x, y, w, h, summed_widths, equal_heights, fig_w, fig_h, anchor):
total_width = fig_w * w
max_height = fig_h * h
# Determine the k factors.
n = len(equal_heights)
eq_rels, eq_abss = equal_heights.T
sm_rels, sm_abss = summed_widths.T
A = np.diag([*eq_rels, 0])
A[:n, -1] = -1
A[-1, :-1] = sm_rels
B = [*(-eq_abss), total_width - sm_abss.sum()]
# A @ K = B: This finds factors {k_0, ..., k_{N-1}, H} so that
# eq_rel_i * k_i + eq_abs_i = H for all i: all axes have the same height
# sum(sm_rel_i * k_i + sm_abs_i) = total_width: fixed total width
# (foo_rel_i * k_i + foo_abs_i will end up being the size of foo.)
*karray, height = np.linalg.solve(A, B)
if height > max_height: # Additionally, upper-bound the height.
karray = (max_height - eq_abss) / eq_rels
# Compute the offsets corresponding to these factors.
ox = np.cumsum([0, *(sm_rels * karray + sm_abss)])
ww = (ox[-1] - ox[0]) / fig_w
h0_rel, h0_abs = equal_heights[0]
hh = (karray[0]*h0_rel + h0_abs) / fig_h
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
x0, y0 = pb1.anchored(anchor, pb).p0
return x0, y0, ox, hh
| AxesDivider |
python | walkccc__LeetCode | solutions/1872. Stone Game VIII/1872.py | {
"start": 0,
"end": 516
} | class ____:
def stoneGameVIII(self, stones: list[int]) -> int:
n = len(stones)
prefix = list(itertools.accumulate(stones))
# dp[i] := the maximum score difference the current player can get when the
# game starts at i, i.e. stones[0..i] are merged into the value prefix[i]
dp = [-math.inf] * n
# Must take all when there're only two stones left.
dp[n - 2] = prefix[-1]
for i in reversed(range(n - 2)):
dp[i] = max(dp[i + 1], prefix[i + 1] - dp[i + 1])
return dp[0]
| Solution |
python | ansible__ansible | lib/ansible/module_utils/facts/system/lsb.py | {
"start": 903,
"end": 3454
} | class ____(BaseFactCollector):
name = 'lsb'
_fact_ids = set() # type: t.Set[str]
STRIP_QUOTES = r'\'\"\\'
def _lsb_release_bin(self, lsb_path, module):
lsb_facts = {}
if not lsb_path:
return lsb_facts
rc, out, err = module.run_command([lsb_path, "-a"], errors='surrogate_then_replace')
if rc != 0:
return lsb_facts
for line in out.splitlines():
if len(line) < 1 or ':' not in line:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
lsb_facts['release'] = value
elif 'Distributor ID:' in line:
lsb_facts['id'] = value
elif 'Description:' in line:
lsb_facts['description'] = value
elif 'Release:' in line:
lsb_facts['release'] = value
elif 'Codename:' in line:
lsb_facts['codename'] = value
return lsb_facts
def _lsb_release_file(self, etc_lsb_release_location):
lsb_facts = {}
if not os.path.exists(etc_lsb_release_location):
return lsb_facts
for line in get_file_lines(etc_lsb_release_location):
value = line.split('=', 1)[1].strip()
if 'DISTRIB_ID' in line:
lsb_facts['id'] = value
elif 'DISTRIB_RELEASE' in line:
lsb_facts['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
lsb_facts['description'] = value
elif 'DISTRIB_CODENAME' in line:
lsb_facts['codename'] = value
return lsb_facts
def collect(self, module=None, collected_facts=None):
facts_dict = {}
lsb_facts = {}
if not module:
return facts_dict
lsb_path = module.get_bin_path('lsb_release')
# try the 'lsb_release' script first
if lsb_path:
lsb_facts = self._lsb_release_bin(lsb_path,
module=module)
# no lsb_release, try looking in /etc/lsb-release
if not lsb_facts:
lsb_facts = self._lsb_release_file('/etc/lsb-release')
if lsb_facts and 'release' in lsb_facts:
lsb_facts['major_release'] = lsb_facts['release'].split('.')[0]
for k, v in lsb_facts.items():
if v:
lsb_facts[k] = v.strip(LSBFactCollector.STRIP_QUOTES)
facts_dict['lsb'] = lsb_facts
return facts_dict
| LSBFactCollector |
python | pytorch__pytorch | torch/cpu/__init__.py | {
"start": 2124,
"end": 2451
} | class ____:
"""
N.B. This class only exists to facilitate device-agnostic code
"""
def __init__(self, priority: int = -1) -> None:
pass
def wait_stream(self, stream) -> None:
pass
def record_event(self) -> None:
pass
def wait_event(self, event) -> None:
pass
| Stream |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_sp500_ticker.py | {
"start": 677,
"end": 1681
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_sp500_ticker"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_sp500_ticker(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidSp500Ticker |
python | pallets__flask | src/flask/cli.py | {
"start": 29098,
"end": 36808
} | class ____(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(
self, value: t.Any, param: click.Parameter | None, ctx: click.Context | None
) -> t.Any:
items = self.split_envvar_value(value)
# can't call no-arg super() inside list comprehension until Python 3.12
super_convert = super().convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert",
type=CertParamType(),
help="Specify a certificate file to use HTTPS.",
is_eager=True,
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
f" are separated by {os.path.pathsep!r}."
),
)
@click.option(
"--exclude-patterns",
default=None,
type=SeparatedPathType(),
help=(
"Files matching these fnmatch patterns will not trigger a reload"
" on change. Multiple patterns are separated by"
f" {os.path.pathsep!r}."
),
)
@pass_script_info
def run_command(
info: ScriptInfo,
host: str,
port: int,
reload: bool,
debugger: bool,
with_threads: bool,
cert: ssl.SSLContext | tuple[str, str | None] | t.Literal["adhoc"] | None,
extra_files: list[str] | None,
exclude_patterns: list[str] | None,
) -> None:
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default with the '--debug'
option.
"""
try:
app: WSGIApplication = info.load_app() # pyright: ignore
except Exception as e:
if is_running_from_reloader():
# When reloading, print out the error immediately, but raise
# it later so the debugger or server can handle it.
traceback.print_exc()
err = e
def app(
environ: WSGIEnvironment, start_response: StartResponse
) -> cabc.Iterable[bytes]:
raise err from None
else:
# When not reloading, raise the error immediately so the
# command fails.
raise e from None
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
show_server_banner(debug, info.app_import_path)
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
exclude_patterns=exclude_patterns,
)
run_command.params.insert(0, _debug_option)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command() -> None:
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to its configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
banner = (
f"Python {sys.version} on {sys.platform}\n"
f"App: {current_app.import_name}\n"
f"Instance: {current_app.instance_path}"
)
ctx: dict[str, t.Any] = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup) as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(current_app.make_shell_context())
# Site, customize, or startup script can set a hook to call when
# entering interactive mode. The default one sets up readline with
# tab and history completion.
interactive_hook = getattr(sys, "__interactivehook__", None)
if interactive_hook is not None:
try:
import readline
from rlcompleter import Completer
except ImportError:
pass
else:
# rlcompleter uses __main__.__dict__ by default, which is
# flask.__main__. Use the shell context instead.
readline.set_completer(Completer(ctx).complete)
interactive_hook()
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "domain", "rule", "match")),
default="endpoint",
help=(
"Method to sort routes by. 'match' is the order that Flask will match routes"
" when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort: str, all_methods: bool) -> None:
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set() if all_methods else {"HEAD", "OPTIONS"}
host_matching = current_app.url_map.host_matching
has_domain = any(rule.host if host_matching else rule.subdomain for rule in rules)
rows = []
for rule in rules:
row = [
rule.endpoint,
", ".join(sorted((rule.methods or set()) - ignored_methods)),
]
if has_domain:
row.append((rule.host if host_matching else rule.subdomain) or "")
row.append(rule.rule)
rows.append(row)
headers = ["Endpoint", "Methods"]
sorts = ["endpoint", "methods"]
if has_domain:
headers.append("Host" if host_matching else "Subdomain")
sorts.append("domain")
headers.append("Rule")
sorts.append("rule")
try:
rows.sort(key=itemgetter(sorts.index(sort)))
except ValueError:
pass
rows.insert(0, headers)
widths = [max(len(row[i]) for row in rows) for i in range(len(headers))]
rows.insert(1, ["-" * w for w in widths])
template = " ".join(f"{{{i}:<{w}}}" for i, w in enumerate(widths))
for row in rows:
click.echo(template.format(*row))
cli = FlaskGroup(
name="flask",
help="""\
A general utility script for Flask applications.
An application to load must be given with the '--app' option,
'FLASK_APP' environment variable, or with a 'wsgi.py' or 'app.py' file
in the current directory.
""",
)
def main() -> None:
cli.main()
if __name__ == "__main__":
main()
| SeparatedPathType |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/databricks_sql_datasource.py | {
"start": 4342,
"end": 5830
} | class ____(SqlTableAsset):
@pydantic.validator("table_name")
@override
def _resolve_quoted_name(cls, table_name: str) -> str | quoted_name:
table_name_is_quoted: bool = cls._is_bracketed_by_quotes(table_name)
from great_expectations.compatibility import sqlalchemy
if sqlalchemy.quoted_name: # type: ignore[truthy-function] # FIXME CoP
if isinstance(table_name, sqlalchemy.quoted_name):
return table_name
if table_name_is_quoted:
# https://docs.sqlalchemy.org/en/20/core/sqlelement.html#sqlalchemy.sql.expression.quoted_name.quote
# Remove the quotes and add them back using the sqlalchemy.quoted_name function
# TODO: We need to handle nested quotes
table_name = table_name.strip("`")
return sqlalchemy.quoted_name(
value=table_name,
quote=table_name_is_quoted,
)
return table_name
@staticmethod
@override
def _is_bracketed_by_quotes(target: str) -> bool:
"""Returns True if the target string is bracketed by quotes.
Arguments:
target: A string to check if it is bracketed by quotes.
Returns:
True if the target string is bracketed by quotes.
"""
# TODO: what todo with regular quotes? Error? Warn? "Fix"?
return target.startswith("`") and target.endswith("`")
@public_api
| DatabricksTableAsset |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 28157,
"end": 30759
} | class ____(ASTBase):
def __init__(
self, args: list[ASTFunctionParameter], attrs: ASTAttributeList
) -> None:
self.args = args
self.attrs = attrs
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTParameters):
return NotImplemented
return self.args == other.args and self.attrs == other.attrs
def __hash__(self) -> int:
return hash((self.args, self.attrs))
@property
def function_params(self) -> list[ASTFunctionParameter]:
return self.args
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
first = True
for a in self.args:
if not first:
res.append(', ')
first = False
res.append(str(a))
res.append(')')
if len(self.attrs) != 0:
res.extend((' ', transform(self.attrs)))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
multi_line_parameter_list = False
test_node: Element = signode
while test_node.parent:
if not isinstance(test_node, addnodes.desc_signature):
test_node = test_node.parent
continue
multi_line_parameter_list = test_node.get(
'multi_line_parameter_list', False
)
break
# only use the desc_parameterlist for the outer list, not for inner lists
if mode == 'lastIsName':
paramlist = addnodes.desc_parameterlist()
paramlist['multi_line_parameter_list'] = multi_line_parameter_list
for arg in self.args:
param = addnodes.desc_parameter('', '', noemph=True)
arg.describe_signature(param, 'param', env, symbol=symbol)
paramlist += param
signode += paramlist
else:
signode += addnodes.desc_sig_punctuation('(', '(')
first = True
for arg in self.args:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
first = False
arg.describe_signature(signode, 'markType', env, symbol=symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
if len(self.attrs) != 0:
signode += addnodes.desc_sig_space()
self.attrs.describe_signature(signode)
| ASTParameters |
python | huggingface__transformers | src/transformers/models/metaclip_2/modeling_metaclip_2.py | {
"start": 48831,
"end": 50957
} | class ____(MetaClip2PreTrainedModel):
main_input_name = "pixel_values"
input_modalities = ("image",)
def __init__(self, config: MetaClip2Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
vision_model = MetaClip2VisionModel._from_config(config.vision_config)
self.vision_model = vision_model.vision_model
# Classifier head
self.classifier = (
nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> ImageClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values,
**kwargs,
)
sequence_output = outputs.last_hidden_state
sequence_output = torch.mean(sequence_output[:, 1:, :], dim=1)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
return ImageClassifierOutput(
loss=loss,
logits=logits,
)
__all__ = [
"MetaClip2Model",
"MetaClip2PreTrainedModel",
"MetaClip2TextModel",
"MetaClip2TextModelWithProjection",
"MetaClip2VisionModel",
"MetaClip2VisionModelWithProjection",
"MetaClip2ForImageClassification",
]
| MetaClip2ForImageClassification |
python | mlflow__mlflow | mlflow/tracking/_model_registry/client.py | {
"start": 1389,
"end": 32533
} | class ____:
"""
Client of an MLflow Model Registry Server that creates and manages registered
models and model versions.
"""
def __init__(self, registry_uri, tracking_uri):
"""
Args:
registry_uri: Address of local or remote model registry server.
tracking_uri: Address of local or remote tracking server.
"""
self.registry_uri = registry_uri
self.tracking_uri = tracking_uri
# NB: Fetch the tracking store (`self.store`) upon client initialization to ensure that
# the tracking URI is valid and the store can be properly resolved. We define `store` as a
# property method to ensure that the client is serializable, even if the store is not
self.store
@property
def store(self):
return utils._get_store(self.registry_uri, self.tracking_uri)
# Registered Model Methods
@record_usage_event(CreateRegisteredModelEvent)
def create_registered_model(self, name, tags=None, description=None, deployment_job_id=None):
"""Create a new registered model in backend store.
Args:
name: Name of the new model. This is expected to be unique in the backend store.
tags: A dictionary of key-value pairs that are converted into
:py:class:`mlflow.entities.model_registry.RegisteredModelTag` objects.
description: Description of the model.
deployment_job_id: Optional deployment job ID.
Returns:
A single object of :py:class:`mlflow.entities.model_registry.RegisteredModel`
created by backend.
"""
# TODO: Do we want to validate the name is legit here - non-empty without "/" and ":" ?
# Those are constraints applicable to any backend, given the model URI format.
tags = tags or {}
tags = [RegisteredModelTag(key, str(value)) for key, value in tags.items()]
return self.store.create_registered_model(name, tags, description, deployment_job_id)
def update_registered_model(self, name, description, deployment_job_id=None):
"""Updates description for RegisteredModel entity.
Backend raises exception if a registered model with given name does not exist.
Args:
name: Name of the registered model to update.
description: New description.
deployment_job_id: Optional deployment job ID.
Returns:
A single updated :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
return self.store.update_registered_model(
name=name, description=description, deployment_job_id=deployment_job_id
)
def rename_registered_model(self, name, new_name):
"""Update registered model name.
Args:
name: Name of the registered model to update.
new_name: New proposed name for the registered model.
Returns:
A single updated :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
if new_name.strip() == "":
raise MlflowException("The name must not be an empty string.")
return self.store.rename_registered_model(name=name, new_name=new_name)
def delete_registered_model(self, name):
"""Delete registered model.
Backend raises exception if a registered model with given name does not exist.
Args:
name: Name of the registered model to delete.
"""
self.store.delete_registered_model(name)
def search_registered_models(
self,
filter_string=None,
max_results=SEARCH_REGISTERED_MODEL_MAX_RESULTS_DEFAULT,
order_by=None,
page_token=None,
):
"""Search for registered models in backend that satisfy the filter criteria.
Args:
filter_string: Filter query string, defaults to searching all registered models.
max_results: Maximum number of registered models desired.
order_by: List of column names with ASC|DESC annotation, to be used for ordering
matching search results.
page_token: Token specifying the next page of results. It should be obtained from
a ``search_registered_models`` call.
Returns:
A PagedList of :py:class:`mlflow.entities.model_registry.RegisteredModel` objects
that satisfy the search expressions. The pagination token for the next page can be
obtained via the ``token`` attribute of the object.
"""
# Add prompt filter for prompt-supported registries that also support filter_string
# Unity Catalog supports prompts but not filter_string parameter
if is_prompt_supported_registry(self.registry_uri) and not (
self.registry_uri or ""
).startswith("databricks-uc"):
# Adjust filter string to include or exclude prompts
filter_string = add_prompt_filter_string(filter_string, False)
return self.store.search_registered_models(filter_string, max_results, order_by, page_token)
def get_registered_model(self, name):
"""
Args:
name: Name of the registered model to get.
Returns:
A single :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
return self.store.get_registered_model(name)
def get_latest_versions(self, name, stages=None):
"""Latest version models for each requests stage. If no ``stages`` provided, returns the
latest version for each stage.
Args:
name: Name of the registered model from which to get the latest versions.
stages: List of desired stages. If input list is None, return latest versions for
'Staging' and 'Production' stages.
Returns:
List of :py:class:`mlflow.entities.model_registry.ModelVersion` objects.
"""
return self.store.get_latest_versions(name, stages)
def set_registered_model_tag(self, name, key, value):
"""Set a tag for the registered model.
Args:
name: Registered model name.
key: Tag key to log.
value: Tag value log.
Returns:
None
"""
self.store.set_registered_model_tag(name, RegisteredModelTag(key, str(value)))
def delete_registered_model_tag(self, name, key):
"""Delete a tag associated with the registered model.
Args:
name: Registered model name.
key: Registered model tag key.
Returns:
None
"""
self.store.delete_registered_model_tag(name, key)
# Model Version Methods
@record_usage_event(CreateModelVersionEvent)
def create_model_version(
self,
name,
source,
run_id=None,
tags=None,
run_link=None,
description=None,
await_creation_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
local_model_path=None,
model_id: str | None = None,
):
"""Create a new model version from given source.
Args:
name: Name of the containing registered model.
source: URI indicating the location of the model artifacts.
run_id: Run ID from MLflow tracking server that generated the model.
tags: A dictionary of key-value pairs that are converted into
:py:class:`mlflow.entities.model_registry.ModelVersionTag` objects.
run_link: Link to the run from an MLflow tracking server that generated this model.
description: Description of the version.
await_creation_for: Number of seconds to wait for the model version to finish being
created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
local_model_path: Local path to the MLflow model, if it's already accessible on the
local filesystem. Can be used by AbstractStores that upload model version files
to the model registry to avoid a redundant download from the source location when
logging and registering a model via a single
mlflow.<flavor>.log_model(..., registered_model_name) call.
model_id: The ID of the model (from an Experiment) that is being promoted to a
registered model version, if applicable.
Returns:
Single :py:class:`mlflow.entities.model_registry.ModelVersion` object created by
backend.
"""
tags = tags or {}
tags = [ModelVersionTag(key, str(value)) for key, value in tags.items()]
arg_names = _get_arg_names(self.store.create_model_version)
if "local_model_path" in arg_names:
mv = self.store.create_model_version(
name,
source,
run_id,
tags,
run_link,
description,
local_model_path=local_model_path,
model_id=model_id,
)
else:
# Fall back to calling create_model_version without
# local_model_path since old model registry store implementations may not
# support the local_model_path argument.
mv = self.store.create_model_version(
name, source, run_id, tags, run_link, description, model_id=model_id
)
if await_creation_for and await_creation_for > 0:
self.store._await_model_version_creation(mv, await_creation_for)
return mv
def copy_model_version(self, src_mv, dst_name):
"""Copy a model version from one registered model to another as a new model version.
Args:
src_mv: A :py:class:`mlflow.entities.model_registry.ModelVersion` object representing
the source model version.
dst_name: The name of the registered model to copy the model version to. If a
registered model with this name does not exist, it will be created.
Returns:
Single :py:class:`mlflow.entities.model_registry.ModelVersion` object representing
the cloned model version.
"""
return self.store.copy_model_version(src_mv=src_mv, dst_name=dst_name)
def update_model_version(self, name, version, description):
"""Update metadata associated with a model version in backend.
Args:
name: Name of the containing registered model.
version: Version number of the model version.
description: New description.
"""
return self.store.update_model_version(name=name, version=version, description=description)
def transition_model_version_stage(self, name, version, stage, archive_existing_versions=False):
"""Update model version stage.
Args:
name: Registered model name.
version: Registered model version.
stage: New desired stage for this model version.
archive_existing_versions: If this flag is set to ``True``, all existing model
versions in the stage will be automatically moved to the "archived" stage. Only
valid when ``stage`` is ``"staging"`` or ``"production"`` otherwise an error will be
raised.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
if stage.strip() == "":
raise MlflowException("The stage must not be an empty string.")
return self.store.transition_model_version_stage(
name=name,
version=version,
stage=stage,
archive_existing_versions=archive_existing_versions,
)
def get_model_version(self, name, version):
"""
Args:
name: Name of the containing registered model.
version: Version number of the model version.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
return self.store.get_model_version(name, version)
def delete_model_version(self, name, version):
"""Delete model version in backend.
Args:
name: Name of the containing registered model.
version: Version number of the model version.
"""
self.store.delete_model_version(name, version)
def get_model_version_download_uri(self, name, version):
"""Get the download location in Model Registry for this model version.
Args:
name: Name of the containing registered model.
version: Version number of the model version.
Returns:
A single URI location that allows reads for downloading.
"""
return self.store.get_model_version_download_uri(name, version)
def search_model_versions(
self,
filter_string=None,
max_results=SEARCH_MODEL_VERSION_MAX_RESULTS_DEFAULT,
order_by=None,
page_token=None,
):
"""Search for model versions in backend that satisfy the filter criteria.
.. warning:
The model version search results may not have aliases populated for performance reasons.
Args:
filter_string: A filter string expression. Currently supports a single filter
condition either name of model like ``name = 'model_name'`` or
``run_id = '...'``.
max_results: Maximum number of model versions desired.
order_by: List of column names with ASC|DESC annotation, to be used for ordering
matching search results.
page_token: Token specifying the next page of results. It should be obtained from
a ``search_model_versions`` call.
Returns:
A PagedList of :py:class:`mlflow.entities.model_registry.ModelVersion`
objects that satisfy the search expressions. The pagination token for the next
page can be obtained via the ``token`` attribute of the object.
"""
return self.store.search_model_versions(filter_string, max_results, order_by, page_token)
def get_model_version_stages(self, name, version):
"""
Returns:
A list of valid stages.
"""
return self.store.get_model_version_stages(name, version)
def set_model_version_tag(self, name, version, key, value):
"""Set a tag for the model version.
Args:
name: Registered model name.
version: Registered model version.
key: Tag key to log.
value: Tag value to log.
Returns:
None
"""
self.store.set_model_version_tag(name, version, ModelVersionTag(key, str(value)))
def delete_model_version_tag(self, name, version, key):
"""Delete a tag associated with the model version.
Args:
name: Registered model name.
version: Registered model version.
key: Tag key.
Returns:
None
"""
self.store.delete_model_version_tag(name, version, key)
def set_registered_model_alias(self, name, alias, version):
"""Set a registered model alias pointing to a model version.
Args:
name: Registered model name.
alias: Name of the alias.
version: Registered model version number.
Returns:
None
"""
self.store.set_registered_model_alias(name, alias, version)
def delete_registered_model_alias(self, name, alias):
"""Delete an alias associated with a registered model.
Args:
name: Registered model name.
alias: Name of the alias.
Returns:
None
"""
self.store.delete_registered_model_alias(name, alias)
def get_model_version_by_alias(self, name, alias):
"""Get the model version instance by name and alias.
Args:
name: Registered model name.
alias: Name of the alias.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
return self.store.get_model_version_by_alias(name, alias)
@record_usage_event(CreatePromptEvent)
def create_prompt(
self,
name: str,
description: str | None = None,
tags: dict[str, str] | None = None,
) -> Prompt:
"""
Create a new prompt in the registry.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
description: Optional description of the prompt.
tags: Optional dictionary of prompt tags.
Returns:
A PromptInfo object for Unity Catalog stores.
"""
return self.store.create_prompt(name, description, tags)
def get_prompt(self, name: str) -> Prompt | None:
"""
Get prompt metadata by name.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Registered prompt name.
Returns:
A Prompt object with prompt metadata, or None if not found.
"""
return self.store.get_prompt(name)
def search_prompts(
self,
filter_string: str | None = None,
max_results: int | None = None,
order_by: list[str] | None = None,
page_token: str | None = None,
) -> PagedList[Prompt]:
"""
Search for prompts in the registry.
This method delegates directly to the store, providing Unity Catalog support
when used with Unity Catalog registries.
Args:
filter_string: Filter query string. For Unity Catalog registries, must include
catalog and schema: "catalog = 'catalog_name' AND schema = 'schema_name'".
For traditional registries, standard filter expressions are supported.
max_results: Maximum number of prompts to return.
order_by: List of column names with ASC|DESC annotation.
page_token: Token specifying the next page of results.
Returns:
A PagedList of Prompt objects.
"""
return self.store.search_prompts(
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
def delete_prompt(self, name: str) -> None:
"""
Delete a prompt from the registry.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt to delete.
Returns:
None
"""
self.store.delete_prompt(name)
def create_prompt_version(
self,
name: str,
template: str | list[dict[str, Any]],
description: str | None = None,
tags: dict[str, str] | None = None,
response_format: type[BaseModel] | dict[str, Any] | None = None,
) -> PromptVersion:
"""
Create a new version of an existing prompt.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
template: The prompt template content for this version. Can be either:
- A string containing text with variables enclosed in double curly braces,
e.g. {{variable}}, which will be replaced with actual values by the `format`
method.
- A list of dictionaries representing chat messages, where each message has
'role' and 'content' keys (e.g., [{"role": "user", "content": "Hello {{name}}"}])
description: Optional description of this version.
tags: Optional dictionary of version tags.
response_format: Optional Pydantic class or dictionary defining the expected response
structure. This can be used to specify the schema for structured outputs from LLM
calls.
Returns:
A PromptVersion object representing the new version.
"""
return self.store.create_prompt_version(name, template, description, tags, response_format)
def get_prompt_version(self, name: str, version: str) -> PromptVersion:
"""
Get a specific version of a prompt.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
version: Version number of the prompt.
Returns:
A PromptVersion object.
"""
return self.store.get_prompt_version(name, version)
def delete_prompt_version(self, name: str, version: str) -> None:
"""
Delete a specific version of a prompt.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
version: Version number to delete.
Returns:
None
"""
self.store.delete_prompt_version(name, version)
def set_prompt_tag(self, name: str, key: str, value: str) -> None:
"""
Set a tag on a prompt.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
key: Tag key.
value: Tag value.
Returns:
None
"""
self.store.set_prompt_tag(name, key, value)
def delete_prompt_tag(self, name: str, key: str) -> None:
"""
Delete a tag from a prompt.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
key: Tag key to delete.
Returns:
None
"""
self.store.delete_prompt_tag(name, key)
def get_prompt_version_by_alias(self, name: str, alias: str) -> PromptVersion:
"""
Get a prompt version by alias.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
alias: Alias to look up.
Returns:
A PromptVersion object.
"""
return self.store.get_prompt_version_by_alias(name, alias)
def set_prompt_alias(self, name: str, alias: str, version: str) -> None:
"""
Set an alias for a prompt version.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
alias: Alias to set.
version: Version to alias.
Returns:
None
"""
self.store.set_prompt_alias(name, alias, version)
def delete_prompt_alias(self, name: str, alias: str) -> None:
"""
Delete a prompt alias.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
alias: Alias to delete.
Returns:
None
"""
self.store.delete_prompt_alias(name, alias)
def search_prompt_versions(
self, name: str, max_results: int | None = None, page_token: str | None = None
):
"""
Search prompt versions for a given prompt name.
This method delegates directly to the store. Only supported in Unity Catalog registries.
Args:
name: Name of the prompt to search versions for.
max_results: Maximum number of versions to return.
page_token: Token for pagination.
Returns:
SearchPromptVersionsResponse containing the list of versions.
Raises:
MlflowException: If used with non-Unity Catalog registries.
"""
return self.store.search_prompt_versions(name, max_results, page_token)
def link_prompt_version_to_model(self, name: str, version: int | str, model_id: str) -> None:
"""
Link a prompt version to a model.
Args:
name: The name of the prompt.
version: The version of the prompt.
model_id: The ID of the model to link the prompt version to.
"""
return self.store.link_prompt_version_to_model(name, str(version), model_id)
def link_prompt_version_to_run(self, name: str, version: int | str, run_id: str) -> None:
"""
Link a prompt version to a run.
Args:
name: The name of the prompt.
version: The version of the prompt.
run_id: The ID of the run to link the prompt version to.
"""
return self.store.link_prompt_version_to_run(name, str(version), run_id)
def link_prompt_versions_to_trace(
self, prompt_versions: list[PromptVersion], trace_id: str
) -> None:
"""
Link multiple prompt versions to a trace.
Args:
prompt_versions: List of PromptVersion objects to link.
trace_id: Trace ID to link the prompt versions to.
"""
return self.store.link_prompts_to_trace(prompt_versions=prompt_versions, trace_id=trace_id)
def set_prompt_version_tag(self, name: str, version: str, key: str, value: str) -> None:
"""
Set a tag on a prompt version.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
version: Version number of the prompt.
key: Tag key.
value: Tag value.
Returns:
None
"""
self.store.set_prompt_version_tag(name, version, key, value)
def delete_prompt_version_tag(self, name: str, version: str, key: str) -> None:
"""
Delete a tag from a prompt version.
This method delegates directly to the store, providing full Unity Catalog support
when used with Unity Catalog registries.
Args:
name: Name of the prompt.
version: Version number of the prompt.
key: Tag key to delete.
Returns:
None
"""
self.store.delete_prompt_version_tag(name, version, key)
# Webhook APIs
@experimental(version="3.3.0")
@record_usage_event(CreateWebhookEvent)
def create_webhook(
self,
name: str,
url: str,
events: list[WebhookEvent],
description: str | None = None,
secret: str | None = None,
status: WebhookStatus | None = None,
) -> Webhook:
"""
Create a new webhook.
Args:
name: Unique name for the webhook.
url: Webhook endpoint URL.
events: List of event types that trigger this webhook.
description: Optional description of the webhook.
secret: Optional secret for HMAC signature verification.
status: Webhook status (defaults to ACTIVE).
Returns:
A :py:class:`mlflow.entities.webhook.Webhook` object representing the created webhook.
"""
return self.store.create_webhook(name, url, events, description, secret, status)
@experimental(version="3.3.0")
def get_webhook(self, webhook_id: str) -> Webhook:
"""
Get webhook instance by ID.
Args:
webhook_id: Webhook ID.
Returns:
A :py:class:`mlflow.entities.webhook.Webhook` object.
"""
return self.store.get_webhook(webhook_id)
@experimental(version="3.3.0")
def list_webhooks(
self,
max_results: int | None = None,
page_token: str | None = None,
) -> PagedList[Webhook]:
"""
List webhooks.
Args:
max_results: Maximum number of webhooks to return.
page_token: Token specifying the next page of results.
Returns:
A :py:class:`mlflow.store.entities.paged_list.PagedList` of Webhook objects.
"""
return self.store.list_webhooks(max_results, page_token)
@experimental(version="3.3.0")
def update_webhook(
self,
webhook_id: str,
name: str | None = None,
description: str | None = None,
url: str | None = None,
events: list[WebhookEvent] | None = None,
secret: str | None = None,
status: WebhookStatus | None = None,
) -> Webhook:
"""
Update an existing webhook.
Args:
webhook_id: Webhook ID.
name: New webhook name.
description: New webhook description.
url: New webhook URL.
events: New list of event types.
secret: New webhook secret.
status: New webhook status.
Returns:
A :py:class:`mlflow.entities.webhook.Webhook` object representing the updated webhook.
"""
return self.store.update_webhook(webhook_id, name, description, url, events, secret, status)
@experimental(version="3.3.0")
def delete_webhook(self, webhook_id: str) -> None:
"""
Delete a webhook.
Args:
webhook_id: Webhook ID to delete.
Returns:
None
"""
self.store.delete_webhook(webhook_id)
@experimental(version="3.3.0")
def test_webhook(
self, webhook_id: str, event: WebhookEventStr | WebhookEvent | None = None
) -> WebhookTestResult:
"""
Test a webhook by sending a test payload.
Args:
webhook_id: The ID of the webhook to test.
event: Optional event type to test. Can be a WebhookEvent object or a string in
"entity.action" format (e.g., "model_version.created"). If not specified, uses
the first event from webhook.
Returns:
A :py:class:`mlflow.entities.webhook.WebhookTestResult` indicating success/failure and
response details.
"""
# Convert string to WebhookEvent if needed
if isinstance(event, str):
event = WebhookEvent.from_str(event)
return self.store.test_webhook(webhook_id, event)
| ModelRegistryClient |
python | doocs__leetcode | solution/3300-3399/3394.Check if Grid can be Cut into Sections/Solution.py | {
"start": 0,
"end": 1080
} | class ____:
def countLineIntersections(self, coordinates: List[tuple[int, int]]) -> bool:
lines = 0
overlap = 0
for value, marker in coordinates:
if marker == 0:
overlap -= 1
else:
overlap += 1
if overlap == 0:
lines += 1
return lines >= 3
def checkValidCuts(self, n: int, rectangles: List[List[int]]) -> bool:
y_coordinates = []
x_coordinates = []
for rect in rectangles:
x1, y1, x2, y2 = rect
y_coordinates.append((y1, 1)) # start
y_coordinates.append((y2, 0)) # end
x_coordinates.append((x1, 1)) # start
x_coordinates.append((x2, 0)) # end
# Sort by coordinate value, and for tie, put end (0) before start (1)
y_coordinates.sort(key=lambda x: (x[0], x[1]))
x_coordinates.sort(key=lambda x: (x[0], x[1]))
return self.countLineIntersections(
y_coordinates
) or self.countLineIntersections(x_coordinates)
| Solution |
python | crytic__slither | slither/slithir/operations/length.py | {
"start": 632,
"end": 1401
} | class ____(OperationWithLValue):
def __init__(
self,
value: Union[StateVariable, LocalIRVariable, LocalVariable, StateIRVariable],
lvalue: Union[ReferenceVariable, ReferenceVariableSSA],
) -> None:
super().__init__()
assert is_valid_rvalue(value)
assert is_valid_lvalue(lvalue)
self._value = value
self._lvalue = lvalue
lvalue.set_type(ElementaryType("uint256"))
@property
def read(self) -> List[Union[LocalVariable, StateVariable, LocalIRVariable, StateIRVariable]]:
return [self._value]
@property
def value(self) -> Union[StateVariable, LocalVariable]:
return self._value
def __str__(self):
return f"{self.lvalue} -> LENGTH {self.value}"
| Length |
python | doocs__leetcode | solution/1800-1899/1877.Minimize Maximum Pair Sum in Array/Solution.py | {
"start": 0,
"end": 169
} | class ____:
def minPairSum(self, nums: List[int]) -> int:
nums.sort()
return max(x + nums[-i - 1] for i, x in enumerate(nums[: len(nums) >> 1]))
| Solution |
python | django__django | tests/i18n/test_management.py | {
"start": 126,
"end": 1046
} | class ____(SimpleTestCase):
def test_repr(self):
dirpath = "dir"
file_name = "example"
trans_file = TranslatableFile(
dirpath=dirpath, file_name=file_name, locale_dir=None
)
self.assertEqual(
repr(trans_file),
"<TranslatableFile: %s>" % os.path.join(dirpath, file_name),
)
def test_eq(self):
dirpath = "dir"
file_name = "example"
trans_file = TranslatableFile(
dirpath=dirpath, file_name=file_name, locale_dir=None
)
trans_file_eq = TranslatableFile(
dirpath=dirpath, file_name=file_name, locale_dir=None
)
trans_file_not_eq = TranslatableFile(
dirpath="tmp", file_name=file_name, locale_dir=None
)
self.assertEqual(trans_file, trans_file_eq)
self.assertNotEqual(trans_file, trans_file_not_eq)
| TranslatableFileTests |
python | redis__redis-py | redis/commands/core.py | {
"start": 188499,
"end": 213557
} | class ____(CommandsProtocol):
"""
Redis commands for Hash data type.
see: https://redis.io/topics/data-types-intro#redis-hashes
"""
def hdel(self, name: str, *keys: str) -> Union[Awaitable[int], int]:
"""
Delete ``keys`` from hash ``name``
For more information, see https://redis.io/commands/hdel
"""
return self.execute_command("HDEL", name, *keys)
def hexists(self, name: str, key: str) -> Union[Awaitable[bool], bool]:
"""
Returns a boolean indicating if ``key`` exists within hash ``name``
For more information, see https://redis.io/commands/hexists
"""
return self.execute_command("HEXISTS", name, key, keys=[name])
def hget(
self, name: str, key: str
) -> Union[Awaitable[Optional[str]], Optional[str]]:
"""
Return the value of ``key`` within the hash ``name``
For more information, see https://redis.io/commands/hget
"""
return self.execute_command("HGET", name, key, keys=[name])
def hgetall(self, name: str) -> Union[Awaitable[dict], dict]:
"""
Return a Python dict of the hash's name/value pairs
For more information, see https://redis.io/commands/hgetall
"""
return self.execute_command("HGETALL", name, keys=[name])
def hgetdel(
self, name: str, *keys: str
) -> Union[
Awaitable[Optional[List[Union[str, bytes]]]], Optional[List[Union[str, bytes]]]
]:
"""
Return the value of ``key`` within the hash ``name`` and
delete the field in the hash.
This command is similar to HGET, except for the fact that it also deletes
the key on success from the hash with the provided ```name```.
Available since Redis 8.0
For more information, see https://redis.io/commands/hgetdel
"""
if len(keys) == 0:
raise DataError("'hgetdel' should have at least one key provided")
return self.execute_command("HGETDEL", name, "FIELDS", len(keys), *keys)
def hgetex(
self,
name: KeyT,
*keys: str,
ex: Optional[ExpiryT] = None,
px: Optional[ExpiryT] = None,
exat: Optional[AbsExpiryT] = None,
pxat: Optional[AbsExpiryT] = None,
persist: bool = False,
) -> Union[
Awaitable[Optional[List[Union[str, bytes]]]], Optional[List[Union[str, bytes]]]
]:
"""
Return the values of ``key`` and ``keys`` within the hash ``name``
and optionally set their expiration.
``ex`` sets an expire flag on ``kyes`` for ``ex`` seconds.
``px`` sets an expire flag on ``keys`` for ``px`` milliseconds.
``exat`` sets an expire flag on ``keys`` for ``ex`` seconds,
specified in unix time.
``pxat`` sets an expire flag on ``keys`` for ``ex`` milliseconds,
specified in unix time.
``persist`` remove the time to live associated with the ``keys``.
Available since Redis 8.0
For more information, see https://redis.io/commands/hgetex
"""
if not keys:
raise DataError("'hgetex' should have at least one key provided")
if not at_most_one_value_set((ex, px, exat, pxat, persist)):
raise DataError(
"``ex``, ``px``, ``exat``, ``pxat``, "
"and ``persist`` are mutually exclusive."
)
exp_options: list[EncodableT] = extract_expire_flags(ex, px, exat, pxat)
if persist:
exp_options.append("PERSIST")
return self.execute_command(
"HGETEX",
name,
*exp_options,
"FIELDS",
len(keys),
*keys,
)
def hincrby(
self, name: str, key: str, amount: int = 1
) -> Union[Awaitable[int], int]:
"""
Increment the value of ``key`` in hash ``name`` by ``amount``
For more information, see https://redis.io/commands/hincrby
"""
return self.execute_command("HINCRBY", name, key, amount)
def hincrbyfloat(
self, name: str, key: str, amount: float = 1.0
) -> Union[Awaitable[float], float]:
"""
Increment the value of ``key`` in hash ``name`` by floating ``amount``
For more information, see https://redis.io/commands/hincrbyfloat
"""
return self.execute_command("HINCRBYFLOAT", name, key, amount)
def hkeys(self, name: str) -> Union[Awaitable[List], List]:
"""
Return the list of keys within hash ``name``
For more information, see https://redis.io/commands/hkeys
"""
return self.execute_command("HKEYS", name, keys=[name])
def hlen(self, name: str) -> Union[Awaitable[int], int]:
"""
Return the number of elements in hash ``name``
For more information, see https://redis.io/commands/hlen
"""
return self.execute_command("HLEN", name, keys=[name])
def hset(
self,
name: str,
key: Optional[str] = None,
value: Optional[str] = None,
mapping: Optional[dict] = None,
items: Optional[list] = None,
) -> Union[Awaitable[int], int]:
"""
Set ``key`` to ``value`` within hash ``name``,
``mapping`` accepts a dict of key/value pairs that will be
added to hash ``name``.
``items`` accepts a list of key/value pairs that will be
added to hash ``name``.
Returns the number of fields that were added.
For more information, see https://redis.io/commands/hset
"""
if key is None and not mapping and not items:
raise DataError("'hset' with no key value pairs")
pieces = []
if items:
pieces.extend(items)
if key is not None:
pieces.extend((key, value))
if mapping:
for pair in mapping.items():
pieces.extend(pair)
return self.execute_command("HSET", name, *pieces)
def hsetex(
self,
name: str,
key: Optional[str] = None,
value: Optional[str] = None,
mapping: Optional[dict] = None,
items: Optional[list] = None,
ex: Optional[ExpiryT] = None,
px: Optional[ExpiryT] = None,
exat: Optional[AbsExpiryT] = None,
pxat: Optional[AbsExpiryT] = None,
data_persist_option: Optional[HashDataPersistOptions] = None,
keepttl: bool = False,
) -> Union[Awaitable[int], int]:
"""
Set ``key`` to ``value`` within hash ``name``
``mapping`` accepts a dict of key/value pairs that will be
added to hash ``name``.
``items`` accepts a list of key/value pairs that will be
added to hash ``name``.
``ex`` sets an expire flag on ``keys`` for ``ex`` seconds.
``px`` sets an expire flag on ``keys`` for ``px`` milliseconds.
``exat`` sets an expire flag on ``keys`` for ``ex`` seconds,
specified in unix time.
``pxat`` sets an expire flag on ``keys`` for ``ex`` milliseconds,
specified in unix time.
``data_persist_option`` can be set to ``FNX`` or ``FXX`` to control the
behavior of the command.
``FNX`` will set the value for each provided key to each
provided value only if all do not already exist.
``FXX`` will set the value for each provided key to each
provided value only if all already exist.
``keepttl`` if True, retain the time to live associated with the keys.
Returns the number of fields that were added.
Available since Redis 8.0
For more information, see https://redis.io/commands/hsetex
"""
if key is None and not mapping and not items:
raise DataError("'hsetex' with no key value pairs")
if items and len(items) % 2 != 0:
raise DataError(
"'hsetex' with odd number of items. "
"'items' must contain a list of key/value pairs."
)
if not at_most_one_value_set((ex, px, exat, pxat, keepttl)):
raise DataError(
"``ex``, ``px``, ``exat``, ``pxat``, "
"and ``keepttl`` are mutually exclusive."
)
exp_options: list[EncodableT] = extract_expire_flags(ex, px, exat, pxat)
if data_persist_option:
exp_options.append(data_persist_option.value)
if keepttl:
exp_options.append("KEEPTTL")
pieces = []
if items:
pieces.extend(items)
if key is not None:
pieces.extend((key, value))
if mapping:
for pair in mapping.items():
pieces.extend(pair)
return self.execute_command(
"HSETEX", name, *exp_options, "FIELDS", int(len(pieces) / 2), *pieces
)
def hsetnx(self, name: str, key: str, value: str) -> Union[Awaitable[bool], bool]:
"""
Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
For more information, see https://redis.io/commands/hsetnx
"""
return self.execute_command("HSETNX", name, key, value)
@deprecated_function(
version="4.0.0",
reason="Use 'hset' instead.",
name="hmset",
)
def hmset(self, name: str, mapping: dict) -> Union[Awaitable[str], str]:
"""
Set key to value within hash ``name`` for each corresponding
key and value from the ``mapping`` dict.
For more information, see https://redis.io/commands/hmset
"""
if not mapping:
raise DataError("'hmset' with 'mapping' of length 0")
items = []
for pair in mapping.items():
items.extend(pair)
return self.execute_command("HMSET", name, *items)
def hmget(self, name: str, keys: List, *args: List) -> Union[Awaitable[List], List]:
"""
Returns a list of values ordered identically to ``keys``
For more information, see https://redis.io/commands/hmget
"""
args = list_or_args(keys, args)
return self.execute_command("HMGET", name, *args, keys=[name])
def hvals(self, name: str) -> Union[Awaitable[List], List]:
"""
Return the list of values within hash ``name``
For more information, see https://redis.io/commands/hvals
"""
return self.execute_command("HVALS", name, keys=[name])
def hstrlen(self, name: str, key: str) -> Union[Awaitable[int], int]:
"""
Return the number of bytes stored in the value of ``key``
within hash ``name``
For more information, see https://redis.io/commands/hstrlen
"""
return self.execute_command("HSTRLEN", name, key, keys=[name])
def hexpire(
self,
name: KeyT,
seconds: ExpiryT,
*fields: str,
nx: bool = False,
xx: bool = False,
gt: bool = False,
lt: bool = False,
) -> ResponseT:
"""
Sets or updates the expiration time for fields within a hash key, using relative
time in seconds.
If a field already has an expiration time, the behavior of the update can be
controlled using the `nx`, `xx`, `gt`, and `lt` parameters.
The return value provides detailed information about the outcome for each field.
For more information, see https://redis.io/commands/hexpire
Args:
name: The name of the hash key.
seconds: Expiration time in seconds, relative. Can be an integer, or a
Python `timedelta` object.
fields: List of fields within the hash to apply the expiration time to.
nx: Set expiry only when the field has no expiry.
xx: Set expiry only when the field has an existing expiry.
gt: Set expiry only when the new expiry is greater than the current one.
lt: Set expiry only when the new expiry is less than the current one.
Returns:
Returns a list which contains for each field in the request:
- `-2` if the field does not exist, or if the key does not exist.
- `0` if the specified NX | XX | GT | LT condition was not met.
- `1` if the expiration time was set or updated.
- `2` if the field was deleted because the specified expiration time is
in the past.
"""
conditions = [nx, xx, gt, lt]
if sum(conditions) > 1:
raise ValueError("Only one of 'nx', 'xx', 'gt', 'lt' can be specified.")
if isinstance(seconds, datetime.timedelta):
seconds = int(seconds.total_seconds())
options = []
if nx:
options.append("NX")
if xx:
options.append("XX")
if gt:
options.append("GT")
if lt:
options.append("LT")
return self.execute_command(
"HEXPIRE", name, seconds, *options, "FIELDS", len(fields), *fields
)
def hpexpire(
self,
name: KeyT,
milliseconds: ExpiryT,
*fields: str,
nx: bool = False,
xx: bool = False,
gt: bool = False,
lt: bool = False,
) -> ResponseT:
"""
Sets or updates the expiration time for fields within a hash key, using relative
time in milliseconds.
If a field already has an expiration time, the behavior of the update can be
controlled using the `nx`, `xx`, `gt`, and `lt` parameters.
The return value provides detailed information about the outcome for each field.
For more information, see https://redis.io/commands/hpexpire
Args:
name: The name of the hash key.
milliseconds: Expiration time in milliseconds, relative. Can be an integer,
or a Python `timedelta` object.
fields: List of fields within the hash to apply the expiration time to.
nx: Set expiry only when the field has no expiry.
xx: Set expiry only when the field has an existing expiry.
gt: Set expiry only when the new expiry is greater than the current one.
lt: Set expiry only when the new expiry is less than the current one.
Returns:
Returns a list which contains for each field in the request:
- `-2` if the field does not exist, or if the key does not exist.
- `0` if the specified NX | XX | GT | LT condition was not met.
- `1` if the expiration time was set or updated.
- `2` if the field was deleted because the specified expiration time is
in the past.
"""
conditions = [nx, xx, gt, lt]
if sum(conditions) > 1:
raise ValueError("Only one of 'nx', 'xx', 'gt', 'lt' can be specified.")
if isinstance(milliseconds, datetime.timedelta):
milliseconds = int(milliseconds.total_seconds() * 1000)
options = []
if nx:
options.append("NX")
if xx:
options.append("XX")
if gt:
options.append("GT")
if lt:
options.append("LT")
return self.execute_command(
"HPEXPIRE", name, milliseconds, *options, "FIELDS", len(fields), *fields
)
def hexpireat(
self,
name: KeyT,
unix_time_seconds: AbsExpiryT,
*fields: str,
nx: bool = False,
xx: bool = False,
gt: bool = False,
lt: bool = False,
) -> ResponseT:
"""
Sets or updates the expiration time for fields within a hash key, using an
absolute Unix timestamp in seconds.
If a field already has an expiration time, the behavior of the update can be
controlled using the `nx`, `xx`, `gt`, and `lt` parameters.
The return value provides detailed information about the outcome for each field.
For more information, see https://redis.io/commands/hexpireat
Args:
name: The name of the hash key.
unix_time_seconds: Expiration time as Unix timestamp in seconds. Can be an
integer or a Python `datetime` object.
fields: List of fields within the hash to apply the expiration time to.
nx: Set expiry only when the field has no expiry.
xx: Set expiry only when the field has an existing expiration time.
gt: Set expiry only when the new expiry is greater than the current one.
lt: Set expiry only when the new expiry is less than the current one.
Returns:
Returns a list which contains for each field in the request:
- `-2` if the field does not exist, or if the key does not exist.
- `0` if the specified NX | XX | GT | LT condition was not met.
- `1` if the expiration time was set or updated.
- `2` if the field was deleted because the specified expiration time is
in the past.
"""
conditions = [nx, xx, gt, lt]
if sum(conditions) > 1:
raise ValueError("Only one of 'nx', 'xx', 'gt', 'lt' can be specified.")
if isinstance(unix_time_seconds, datetime.datetime):
unix_time_seconds = int(unix_time_seconds.timestamp())
options = []
if nx:
options.append("NX")
if xx:
options.append("XX")
if gt:
options.append("GT")
if lt:
options.append("LT")
return self.execute_command(
"HEXPIREAT",
name,
unix_time_seconds,
*options,
"FIELDS",
len(fields),
*fields,
)
def hpexpireat(
self,
name: KeyT,
unix_time_milliseconds: AbsExpiryT,
*fields: str,
nx: bool = False,
xx: bool = False,
gt: bool = False,
lt: bool = False,
) -> ResponseT:
"""
Sets or updates the expiration time for fields within a hash key, using an
absolute Unix timestamp in milliseconds.
If a field already has an expiration time, the behavior of the update can be
controlled using the `nx`, `xx`, `gt`, and `lt` parameters.
The return value provides detailed information about the outcome for each field.
For more information, see https://redis.io/commands/hpexpireat
Args:
name: The name of the hash key.
unix_time_milliseconds: Expiration time as Unix timestamp in milliseconds.
Can be an integer or a Python `datetime` object.
fields: List of fields within the hash to apply the expiry.
nx: Set expiry only when the field has no expiry.
xx: Set expiry only when the field has an existing expiry.
gt: Set expiry only when the new expiry is greater than the current one.
lt: Set expiry only when the new expiry is less than the current one.
Returns:
Returns a list which contains for each field in the request:
- `-2` if the field does not exist, or if the key does not exist.
- `0` if the specified NX | XX | GT | LT condition was not met.
- `1` if the expiration time was set or updated.
- `2` if the field was deleted because the specified expiration time is
in the past.
"""
conditions = [nx, xx, gt, lt]
if sum(conditions) > 1:
raise ValueError("Only one of 'nx', 'xx', 'gt', 'lt' can be specified.")
if isinstance(unix_time_milliseconds, datetime.datetime):
unix_time_milliseconds = int(unix_time_milliseconds.timestamp() * 1000)
options = []
if nx:
options.append("NX")
if xx:
options.append("XX")
if gt:
options.append("GT")
if lt:
options.append("LT")
return self.execute_command(
"HPEXPIREAT",
name,
unix_time_milliseconds,
*options,
"FIELDS",
len(fields),
*fields,
)
def hpersist(self, name: KeyT, *fields: str) -> ResponseT:
"""
Removes the expiration time for each specified field in a hash.
For more information, see https://redis.io/commands/hpersist
Args:
name: The name of the hash key.
fields: A list of fields within the hash from which to remove the
expiration time.
Returns:
Returns a list which contains for each field in the request:
- `-2` if the field does not exist, or if the key does not exist.
- `-1` if the field exists but has no associated expiration time.
- `1` if the expiration time was successfully removed from the field.
"""
return self.execute_command("HPERSIST", name, "FIELDS", len(fields), *fields)
def hexpiretime(self, key: KeyT, *fields: str) -> ResponseT:
"""
Returns the expiration times of hash fields as Unix timestamps in seconds.
For more information, see https://redis.io/commands/hexpiretime
Args:
key: The hash key.
fields: A list of fields within the hash for which to get the expiration
time.
Returns:
Returns a list which contains for each field in the request:
- `-2` if the field does not exist, or if the key does not exist.
- `-1` if the field exists but has no associated expire time.
- A positive integer representing the expiration Unix timestamp in
seconds, if the field has an associated expiration time.
"""
return self.execute_command(
"HEXPIRETIME", key, "FIELDS", len(fields), *fields, keys=[key]
)
def hpexpiretime(self, key: KeyT, *fields: str) -> ResponseT:
"""
Returns the expiration times of hash fields as Unix timestamps in milliseconds.
For more information, see https://redis.io/commands/hpexpiretime
Args:
key: The hash key.
fields: A list of fields within the hash for which to get the expiration
time.
Returns:
Returns a list which contains for each field in the request:
- `-2` if the field does not exist, or if the key does not exist.
- `-1` if the field exists but has no associated expire time.
- A positive integer representing the expiration Unix timestamp in
milliseconds, if the field has an associated expiration time.
"""
return self.execute_command(
"HPEXPIRETIME", key, "FIELDS", len(fields), *fields, keys=[key]
)
def httl(self, key: KeyT, *fields: str) -> ResponseT:
"""
Returns the TTL (Time To Live) in seconds for each specified field within a hash
key.
For more information, see https://redis.io/commands/httl
Args:
key: The hash key.
fields: A list of fields within the hash for which to get the TTL.
Returns:
Returns a list which contains for each field in the request:
- `-2` if the field does not exist, or if the key does not exist.
- `-1` if the field exists but has no associated expire time.
- A positive integer representing the TTL in seconds if the field has
an associated expiration time.
"""
return self.execute_command(
"HTTL", key, "FIELDS", len(fields), *fields, keys=[key]
)
def hpttl(self, key: KeyT, *fields: str) -> ResponseT:
"""
Returns the TTL (Time To Live) in milliseconds for each specified field within a
hash key.
For more information, see https://redis.io/commands/hpttl
Args:
key: The hash key.
fields: A list of fields within the hash for which to get the TTL.
Returns:
Returns a list which contains for each field in the request:
- `-2` if the field does not exist, or if the key does not exist.
- `-1` if the field exists but has no associated expire time.
- A positive integer representing the TTL in milliseconds if the field
has an associated expiration time.
"""
return self.execute_command(
"HPTTL", key, "FIELDS", len(fields), *fields, keys=[key]
)
AsyncHashCommands = HashCommands
| HashCommands |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1302404,
"end": 1302596
} | class ____(VegaLiteSchema):
"""SymbolShape schema wrapper."""
_schema = {"$ref": "#/definitions/SymbolShape"}
def __init__(self, *args):
super().__init__(*args)
| SymbolShape |
python | bokeh__bokeh | release/action.py | {
"start": 1124,
"end": 1225
} | class ____(ActionReturn):
""""""
kind = ActionResult.PASS
ui = staticmethod(passed)
| PASSED |
python | redis__redis-py | redis/_parsers/commands.py | {
"start": 286,
"end": 573
} | class ____(Enum):
ALL_NODES = "all_nodes"
ALL_SHARDS = "all_shards"
ALL_REPLICAS = "all_replicas"
MULTI_SHARD = "multi_shard"
SPECIAL = "special"
DEFAULT_KEYLESS = "default_keyless"
DEFAULT_KEYED = "default_keyed"
DEFAULT_NODE = "default_node"
| RequestPolicy |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/types.py | {
"start": 9033,
"end": 9138
} | class ____(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
| _OracleBoolean |
python | Pylons__pyramid | src/pyramid/config/views.py | {
"start": 85483,
"end": 93315
} | class ____:
def __init__(self):
self.registrations = []
self.cache_busters = []
def generate(self, path, request, **kw):
for url, spec, route_name in self.registrations:
if path.startswith(spec):
subpath = path[len(spec) :]
if WIN: # pragma: no cover
subpath = subpath.replace('\\', '/') # windows
if self.cache_busters:
subpath, kw = self._bust_asset_path(
request, spec, subpath, kw
)
if url is None:
kw['subpath'] = subpath
return request.route_url(route_name, **kw)
else:
app_url, qs, anchor = parse_url_overrides(request, kw)
parsed = urlparse(url)
if not parsed.scheme:
url = urlunparse(
parsed._replace(scheme=request.scheme)
)
subpath = quote(subpath)
result = urljoin(url, subpath)
return result + qs + anchor
raise ValueError('No static URL definition matching %s' % path)
def add(self, config, name, spec, **extra):
# This feature only allows for the serving of a directory and
# the files contained within, not of a single asset;
# appending a slash here if the spec doesn't have one is
# required for proper prefix matching done in ``generate``
# (``subpath = path[len(spec):]``).
if os.path.isabs(spec): # FBO windows
sep = os.sep
else:
sep = '/'
if not spec.endswith(sep) and not spec.endswith(':'):
spec = spec + sep
# we also make sure the name ends with a slash, purely as a
# convenience: a name that is a url is required to end in a
# slash, so that ``urljoin(name, subpath))`` will work above
# when the name is a URL, and it doesn't hurt things for it to
# have a name that ends in a slash if it's used as a route
# name instead of a URL.
if not name.endswith('/'):
# make sure it ends with a slash
name = name + '/'
if urlparse(name).netloc:
# it's a URL
# url, spec, route_name
url = name
route_name = None
else:
# it's a view name
url = None
cache_max_age = extra.pop('cache_max_age', None)
content_encodings = extra.pop('content_encodings', [])
# create a view
view = static_view(
spec,
cache_max_age=cache_max_age,
use_subpath=True,
reload=config.registry.settings['pyramid.reload_assets'],
content_encodings=content_encodings,
)
# Mutate extra to allow factory, etc to be passed through here.
# Treat permission specially because we'd like to default to
# permissiveness (see docs of config.add_static_view).
permission = extra.pop('permission', None)
if permission is None:
permission = NO_PERMISSION_REQUIRED
context = extra.pop('context', None)
if context is None:
context = extra.pop('for_', None)
renderer = extra.pop('renderer', None)
# register a route using the computed view, permission, and
# pattern, plus any extras passed to us via add_static_view
pattern = "%s*subpath" % name # name already ends with slash
if config.route_prefix:
route_name = f'__{config.route_prefix}/{name}'
else:
route_name = '__%s' % name
config.add_route(route_name, pattern, **extra)
config.add_view(
route_name=route_name,
view=view,
permission=permission,
context=context,
renderer=renderer,
)
def register():
registrations = self.registrations
names = [t[0] for t in registrations]
if name in names:
idx = names.index(name)
registrations.pop(idx)
# url, spec, route_name
registrations.append((url, spec, route_name))
intr = config.introspectable(
'static views', name, 'static view for %r' % name, 'static view'
)
intr['name'] = name
intr['spec'] = spec
config.action(None, callable=register, introspectables=(intr,))
def add_cache_buster(self, config, spec, cachebust, explicit=False):
# ensure the spec always has a trailing slash as we only support
# adding cache busters to folders, not files
if os.path.isabs(spec): # FBO windows
sep = os.sep
else:
sep = '/'
if not spec.endswith(sep) and not spec.endswith(':'):
spec = spec + sep
def register():
if config.registry.settings.get('pyramid.prevent_cachebust'):
return
cache_busters = self.cache_busters
# find duplicate cache buster (old_idx)
# and insertion location (new_idx)
new_idx, old_idx = len(cache_busters), None
for idx, (spec_, cb_, explicit_) in enumerate(cache_busters):
# if we find an identical (spec, explicit) then use it
if spec == spec_ and explicit == explicit_:
old_idx = new_idx = idx
break
# past all explicit==False specs then add to the end
elif not explicit and explicit_:
new_idx = idx
break
# explicit matches and spec is shorter
elif explicit == explicit_ and len(spec) < len(spec_):
new_idx = idx
break
if old_idx is not None:
cache_busters.pop(old_idx)
cache_busters.insert(new_idx, (spec, cachebust, explicit))
intr = config.introspectable(
'cache busters', spec, 'cache buster for %r' % spec, 'cache buster'
)
intr['cachebust'] = cachebust
intr['path'] = spec
intr['explicit'] = explicit
config.action(None, callable=register, introspectables=(intr,))
def _bust_asset_path(self, request, spec, subpath, kw):
registry = request.registry
pkg_name, pkg_subpath = resolve_asset_spec(spec)
rawspec = None
if pkg_name is not None:
pathspec = f'{pkg_name}:{pkg_subpath}{subpath}'
overrides = registry.queryUtility(IPackageOverrides, name=pkg_name)
if overrides is not None:
resource_name = posixpath.join(pkg_subpath, subpath)
sources = overrides.filtered_sources(resource_name)
for source, filtered_path in sources:
rawspec = source.get_path(filtered_path)
if hasattr(source, 'pkg_name'):
rawspec = f'{source.pkg_name}:{rawspec}'
break
else:
pathspec = pkg_subpath + subpath
if rawspec is None:
rawspec = pathspec
kw['pathspec'] = pathspec
kw['rawspec'] = rawspec
for spec_, cachebust, explicit in reversed(self.cache_busters):
if (explicit and rawspec.startswith(spec_)) or (
not explicit and pathspec.startswith(spec_)
):
subpath, kw = cachebust(request, subpath, kw)
break
return subpath, kw
| StaticURLInfo |
python | django__django | tests/admin_filters/tests.py | {
"start": 3058,
"end": 3210
} | class ____(DecadeListFilter):
title = "publication decade"
parameter_name = "decade__in" # Ends with '__in"
| DecadeListFilterParameterEndsWith__In |
python | dateutil__dateutil | tests/test_relativedelta.py | {
"start": 262,
"end": 27638
} | class ____(unittest.TestCase):
now = datetime(2003, 9, 17, 20, 54, 47, 282310)
today = date(2003, 9, 17)
def testInheritance(self):
# Ensure that relativedelta is inheritance-friendly.
class rdChildClass(relativedelta):
pass
ccRD = rdChildClass(years=1, months=1, days=1, leapdays=1, weeks=1,
hours=1, minutes=1, seconds=1, microseconds=1)
rd = relativedelta(years=1, months=1, days=1, leapdays=1, weeks=1,
hours=1, minutes=1, seconds=1, microseconds=1)
self.assertEqual(type(ccRD + rd), type(ccRD),
msg='Addition does not inherit type.')
self.assertEqual(type(ccRD - rd), type(ccRD),
msg='Subtraction does not inherit type.')
self.assertEqual(type(-ccRD), type(ccRD),
msg='Negation does not inherit type.')
self.assertEqual(type(ccRD * 5.0), type(ccRD),
msg='Multiplication does not inherit type.')
self.assertEqual(type(ccRD / 5.0), type(ccRD),
msg='Division does not inherit type.')
def testMonthEndMonthBeginning(self):
self.assertEqual(relativedelta(datetime(2003, 1, 31, 23, 59, 59),
datetime(2003, 3, 1, 0, 0, 0)),
relativedelta(months=-1, seconds=-1))
self.assertEqual(relativedelta(datetime(2003, 3, 1, 0, 0, 0),
datetime(2003, 1, 31, 23, 59, 59)),
relativedelta(months=1, seconds=1))
def testMonthEndMonthBeginningLeapYear(self):
self.assertEqual(relativedelta(datetime(2012, 1, 31, 23, 59, 59),
datetime(2012, 3, 1, 0, 0, 0)),
relativedelta(months=-1, seconds=-1))
self.assertEqual(relativedelta(datetime(2003, 3, 1, 0, 0, 0),
datetime(2003, 1, 31, 23, 59, 59)),
relativedelta(months=1, seconds=1))
def testNextMonth(self):
self.assertEqual(self.now+relativedelta(months=+1),
datetime(2003, 10, 17, 20, 54, 47, 282310))
def testNextMonthPlusOneWeek(self):
self.assertEqual(self.now+relativedelta(months=+1, weeks=+1),
datetime(2003, 10, 24, 20, 54, 47, 282310))
def testNextMonthPlusOneWeek10am(self):
self.assertEqual(self.today +
relativedelta(months=+1, weeks=+1, hour=10),
datetime(2003, 10, 24, 10, 0))
def testNextMonthPlusOneWeek10amDiff(self):
self.assertEqual(relativedelta(datetime(2003, 10, 24, 10, 0),
self.today),
relativedelta(months=+1, days=+7, hours=+10))
def testOneMonthBeforeOneYear(self):
self.assertEqual(self.now+relativedelta(years=+1, months=-1),
datetime(2004, 8, 17, 20, 54, 47, 282310))
def testMonthsOfDiffNumOfDays(self):
self.assertEqual(date(2003, 1, 27)+relativedelta(months=+1),
date(2003, 2, 27))
self.assertEqual(date(2003, 1, 31)+relativedelta(months=+1),
date(2003, 2, 28))
self.assertEqual(date(2003, 1, 31)+relativedelta(months=+2),
date(2003, 3, 31))
def testMonthsOfDiffNumOfDaysWithYears(self):
self.assertEqual(date(2000, 2, 28)+relativedelta(years=+1),
date(2001, 2, 28))
self.assertEqual(date(2000, 2, 29)+relativedelta(years=+1),
date(2001, 2, 28))
self.assertEqual(date(1999, 2, 28)+relativedelta(years=+1),
date(2000, 2, 28))
self.assertEqual(date(1999, 3, 1)+relativedelta(years=+1),
date(2000, 3, 1))
self.assertEqual(date(1999, 3, 1)+relativedelta(years=+1),
date(2000, 3, 1))
self.assertEqual(date(2001, 2, 28)+relativedelta(years=-1),
date(2000, 2, 28))
self.assertEqual(date(2001, 3, 1)+relativedelta(years=-1),
date(2000, 3, 1))
def testNextFriday(self):
self.assertEqual(self.today+relativedelta(weekday=FR),
date(2003, 9, 19))
def testNextFridayInt(self):
self.assertEqual(self.today+relativedelta(weekday=calendar.FRIDAY),
date(2003, 9, 19))
def testLastFridayInThisMonth(self):
self.assertEqual(self.today+relativedelta(day=31, weekday=FR(-1)),
date(2003, 9, 26))
def testLastDayOfFebruary(self):
self.assertEqual(date(2021, 2, 1) + relativedelta(day=31),
date(2021, 2, 28))
def testLastDayOfFebruaryLeapYear(self):
self.assertEqual(date(2020, 2, 1) + relativedelta(day=31),
date(2020, 2, 29))
def testNextWednesdayIsToday(self):
self.assertEqual(self.today+relativedelta(weekday=WE),
date(2003, 9, 17))
def testNextWednesdayNotToday(self):
self.assertEqual(self.today+relativedelta(days=+1, weekday=WE),
date(2003, 9, 24))
def testAddMoreThan12Months(self):
self.assertEqual(date(2003, 12, 1) + relativedelta(months=+13),
date(2005, 1, 1))
def testAddNegativeMonths(self):
self.assertEqual(date(2003, 1, 1) + relativedelta(months=-2),
date(2002, 11, 1))
def test15thISOYearWeek(self):
self.assertEqual(date(2003, 1, 1) +
relativedelta(day=4, weeks=+14, weekday=MO(-1)),
date(2003, 4, 7))
def testMillenniumAge(self):
self.assertEqual(relativedelta(self.now, date(2001, 1, 1)),
relativedelta(years=+2, months=+8, days=+16,
hours=+20, minutes=+54, seconds=+47,
microseconds=+282310))
def testJohnAge(self):
self.assertEqual(relativedelta(self.now,
datetime(1978, 4, 5, 12, 0)),
relativedelta(years=+25, months=+5, days=+12,
hours=+8, minutes=+54, seconds=+47,
microseconds=+282310))
def testJohnAgeWithDate(self):
self.assertEqual(relativedelta(self.today,
datetime(1978, 4, 5, 12, 0)),
relativedelta(years=+25, months=+5, days=+11,
hours=+12))
def testYearDay(self):
self.assertEqual(date(2003, 1, 1)+relativedelta(yearday=260),
date(2003, 9, 17))
self.assertEqual(date(2002, 1, 1)+relativedelta(yearday=260),
date(2002, 9, 17))
self.assertEqual(date(2000, 1, 1)+relativedelta(yearday=260),
date(2000, 9, 16))
self.assertEqual(self.today+relativedelta(yearday=261),
date(2003, 9, 18))
def testYearDayBug(self):
# Tests a problem reported by Adam Ryan.
self.assertEqual(date(2010, 1, 1)+relativedelta(yearday=15),
date(2010, 1, 15))
def testNonLeapYearDay(self):
self.assertEqual(date(2003, 1, 1)+relativedelta(nlyearday=260),
date(2003, 9, 17))
self.assertEqual(date(2002, 1, 1)+relativedelta(nlyearday=260),
date(2002, 9, 17))
self.assertEqual(date(2000, 1, 1)+relativedelta(nlyearday=260),
date(2000, 9, 17))
self.assertEqual(self.today+relativedelta(yearday=261),
date(2003, 9, 18))
def testAddition(self):
self.assertEqual(relativedelta(days=10) +
relativedelta(years=1, months=2, days=3, hours=4,
minutes=5, microseconds=6),
relativedelta(years=1, months=2, days=13, hours=4,
minutes=5, microseconds=6))
def testAbsoluteAddition(self):
self.assertEqual(relativedelta() + relativedelta(day=0, hour=0),
relativedelta(day=0, hour=0))
self.assertEqual(relativedelta(day=0, hour=0) + relativedelta(),
relativedelta(day=0, hour=0))
def testAdditionToDatetime(self):
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=1),
datetime(2000, 1, 2))
def testRightAdditionToDatetime(self):
self.assertEqual(relativedelta(days=1) + datetime(2000, 1, 1),
datetime(2000, 1, 2))
def testAdditionInvalidType(self):
with self.assertRaises(TypeError):
relativedelta(days=3) + 9
def testAdditionUnsupportedType(self):
# For unsupported types that define their own comparators, etc.
self.assertIs(relativedelta(days=1) + NotAValue, NotAValue)
def testAdditionFloatValue(self):
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=float(1)),
datetime(2000, 1, 2))
self.assertEqual(datetime(2000, 1, 1) + relativedelta(months=float(1)),
datetime(2000, 2, 1))
self.assertEqual(datetime(2000, 1, 1) + relativedelta(years=float(1)),
datetime(2001, 1, 1))
def testAdditionFloatFractionals(self):
self.assertEqual(datetime(2000, 1, 1, 0) +
relativedelta(days=float(0.5)),
datetime(2000, 1, 1, 12))
self.assertEqual(datetime(2000, 1, 1, 0, 0) +
relativedelta(hours=float(0.5)),
datetime(2000, 1, 1, 0, 30))
self.assertEqual(datetime(2000, 1, 1, 0, 0, 0) +
relativedelta(minutes=float(0.5)),
datetime(2000, 1, 1, 0, 0, 30))
self.assertEqual(datetime(2000, 1, 1, 0, 0, 0, 0) +
relativedelta(seconds=float(0.5)),
datetime(2000, 1, 1, 0, 0, 0, 500000))
self.assertEqual(datetime(2000, 1, 1, 0, 0, 0, 0) +
relativedelta(microseconds=float(500000.25)),
datetime(2000, 1, 1, 0, 0, 0, 500000))
def testSubtraction(self):
self.assertEqual(relativedelta(days=10) -
relativedelta(years=1, months=2, days=3, hours=4,
minutes=5, microseconds=6),
relativedelta(years=-1, months=-2, days=7, hours=-4,
minutes=-5, microseconds=-6))
def testRightSubtractionFromDatetime(self):
self.assertEqual(datetime(2000, 1, 2) - relativedelta(days=1),
datetime(2000, 1, 1))
def testSubractionWithDatetime(self):
self.assertRaises(TypeError, lambda x, y: x - y,
(relativedelta(days=1), datetime(2000, 1, 1)))
def testSubtractionInvalidType(self):
with self.assertRaises(TypeError):
relativedelta(hours=12) - 14
def testSubtractionUnsupportedType(self):
self.assertIs(relativedelta(days=1) + NotAValue, NotAValue)
def testMultiplication(self):
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=1) * 28,
datetime(2000, 1, 29))
self.assertEqual(datetime(2000, 1, 1) + 28 * relativedelta(days=1),
datetime(2000, 1, 29))
def testMultiplicationUnsupportedType(self):
self.assertIs(relativedelta(days=1) * NotAValue, NotAValue)
def testDivision(self):
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=28) / 28,
datetime(2000, 1, 2))
def testDivisionUnsupportedType(self):
self.assertIs(relativedelta(days=1) / NotAValue, NotAValue)
def testBoolean(self):
self.assertFalse(relativedelta(days=0))
self.assertTrue(relativedelta(days=1))
def testAbsoluteValueNegative(self):
rd_base = relativedelta(years=-1, months=-5, days=-2, hours=-3,
minutes=-5, seconds=-2, microseconds=-12)
rd_expected = relativedelta(years=1, months=5, days=2, hours=3,
minutes=5, seconds=2, microseconds=12)
self.assertEqual(abs(rd_base), rd_expected)
def testAbsoluteValuePositive(self):
rd_base = relativedelta(years=1, months=5, days=2, hours=3,
minutes=5, seconds=2, microseconds=12)
rd_expected = rd_base
self.assertEqual(abs(rd_base), rd_expected)
def testComparison(self):
d1 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1,
minutes=1, seconds=1, microseconds=1)
d2 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1,
minutes=1, seconds=1, microseconds=1)
d3 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1,
minutes=1, seconds=1, microseconds=2)
self.assertEqual(d1, d2)
self.assertNotEqual(d1, d3)
def testInequalityTypeMismatch(self):
# Different type
self.assertFalse(relativedelta(year=1) == 19)
def testInequalityUnsupportedType(self):
self.assertIs(relativedelta(hours=3) == NotAValue, NotAValue)
def testInequalityWeekdays(self):
# Different weekdays
no_wday = relativedelta(year=1997, month=4)
wday_mo_1 = relativedelta(year=1997, month=4, weekday=MO(+1))
wday_mo_2 = relativedelta(year=1997, month=4, weekday=MO(+2))
wday_tu = relativedelta(year=1997, month=4, weekday=TU)
self.assertTrue(wday_mo_1 == wday_mo_1)
self.assertFalse(no_wday == wday_mo_1)
self.assertFalse(wday_mo_1 == no_wday)
self.assertFalse(wday_mo_1 == wday_mo_2)
self.assertFalse(wday_mo_2 == wday_mo_1)
self.assertFalse(wday_mo_1 == wday_tu)
self.assertFalse(wday_tu == wday_mo_1)
def testMonthOverflow(self):
self.assertEqual(relativedelta(months=273),
relativedelta(years=22, months=9))
def testWeeks(self):
# Test that the weeks property is working properly.
rd = relativedelta(years=4, months=2, weeks=8, days=6)
self.assertEqual((rd.weeks, rd.days), (8, 8 * 7 + 6))
rd.weeks = 3
self.assertEqual((rd.weeks, rd.days), (3, 3 * 7 + 6))
def testRelativeDeltaRepr(self):
self.assertEqual(repr(relativedelta(years=1, months=-1, days=15)),
'relativedelta(years=+1, months=-1, days=+15)')
self.assertEqual(repr(relativedelta(months=14, seconds=-25)),
'relativedelta(years=+1, months=+2, seconds=-25)')
self.assertEqual(repr(relativedelta(month=3, hour=3, weekday=SU(3))),
'relativedelta(month=3, weekday=SU(+3), hour=3)')
def testRelativeDeltaFractionalYear(self):
with self.assertRaises(ValueError):
relativedelta(years=1.5)
def testRelativeDeltaFractionalMonth(self):
with self.assertRaises(ValueError):
relativedelta(months=1.5)
def testRelativeDeltaInvalidDatetimeObject(self):
with self.assertRaises(TypeError):
relativedelta(dt1='2018-01-01', dt2='2018-01-02')
with self.assertRaises(TypeError):
relativedelta(dt1=datetime(2018, 1, 1), dt2='2018-01-02')
with self.assertRaises(TypeError):
relativedelta(dt1='2018-01-01', dt2=datetime(2018, 1, 2))
def testRelativeDeltaFractionalAbsolutes(self):
# Fractional absolute values will soon be unsupported,
# check for the deprecation warning.
with pytest.warns(DeprecationWarning):
relativedelta(year=2.86)
with pytest.warns(DeprecationWarning):
relativedelta(month=1.29)
with pytest.warns(DeprecationWarning):
relativedelta(day=0.44)
with pytest.warns(DeprecationWarning):
relativedelta(hour=23.98)
with pytest.warns(DeprecationWarning):
relativedelta(minute=45.21)
with pytest.warns(DeprecationWarning):
relativedelta(second=13.2)
with pytest.warns(DeprecationWarning):
relativedelta(microsecond=157221.93)
def testRelativeDeltaFractionalRepr(self):
rd = relativedelta(years=3, months=-2, days=1.25)
self.assertEqual(repr(rd),
'relativedelta(years=+3, months=-2, days=+1.25)')
rd = relativedelta(hours=0.5, seconds=9.22)
self.assertEqual(repr(rd),
'relativedelta(hours=+0.5, seconds=+9.22)')
def testRelativeDeltaFractionalWeeks(self):
# Equivalent to days=8, hours=18
rd = relativedelta(weeks=1.25)
d1 = datetime(2009, 9, 3, 0, 0)
self.assertEqual(d1 + rd,
datetime(2009, 9, 11, 18))
def testRelativeDeltaFractionalDays(self):
rd1 = relativedelta(days=1.48)
d1 = datetime(2009, 9, 3, 0, 0)
self.assertEqual(d1 + rd1,
datetime(2009, 9, 4, 11, 31, 12))
rd2 = relativedelta(days=1.5)
self.assertEqual(d1 + rd2,
datetime(2009, 9, 4, 12, 0, 0))
def testRelativeDeltaFractionalHours(self):
rd = relativedelta(days=1, hours=12.5)
d1 = datetime(2009, 9, 3, 0, 0)
self.assertEqual(d1 + rd,
datetime(2009, 9, 4, 12, 30, 0))
def testRelativeDeltaFractionalMinutes(self):
rd = relativedelta(hours=1, minutes=30.5)
d1 = datetime(2009, 9, 3, 0, 0)
self.assertEqual(d1 + rd,
datetime(2009, 9, 3, 1, 30, 30))
def testRelativeDeltaFractionalSeconds(self):
rd = relativedelta(hours=5, minutes=30, seconds=30.5)
d1 = datetime(2009, 9, 3, 0, 0)
self.assertEqual(d1 + rd,
datetime(2009, 9, 3, 5, 30, 30, 500000))
def testRelativeDeltaFractionalPositiveOverflow(self):
# Equivalent to (days=1, hours=14)
rd1 = relativedelta(days=1.5, hours=2)
d1 = datetime(2009, 9, 3, 0, 0)
self.assertEqual(d1 + rd1,
datetime(2009, 9, 4, 14, 0, 0))
# Equivalent to (days=1, hours=14, minutes=45)
rd2 = relativedelta(days=1.5, hours=2.5, minutes=15)
d1 = datetime(2009, 9, 3, 0, 0)
self.assertEqual(d1 + rd2,
datetime(2009, 9, 4, 14, 45))
# Carry back up - equivalent to (days=2, hours=2, minutes=0, seconds=1)
rd3 = relativedelta(days=1.5, hours=13, minutes=59.5, seconds=31)
self.assertEqual(d1 + rd3,
datetime(2009, 9, 5, 2, 0, 1))
def testRelativeDeltaFractionalNegativeDays(self):
# Equivalent to (days=-1, hours=-1)
rd1 = relativedelta(days=-1.5, hours=11)
d1 = datetime(2009, 9, 3, 12, 0)
self.assertEqual(d1 + rd1,
datetime(2009, 9, 2, 11, 0, 0))
# Equivalent to (days=-1, hours=-9)
rd2 = relativedelta(days=-1.25, hours=-3)
self.assertEqual(d1 + rd2,
datetime(2009, 9, 2, 3))
def testRelativeDeltaNormalizeFractionalDays(self):
# Equivalent to (days=2, hours=18)
rd1 = relativedelta(days=2.75)
self.assertEqual(rd1.normalized(), relativedelta(days=2, hours=18))
# Equivalent to (days=1, hours=11, minutes=31, seconds=12)
rd2 = relativedelta(days=1.48)
self.assertEqual(rd2.normalized(),
relativedelta(days=1, hours=11, minutes=31, seconds=12))
def testRelativeDeltaNormalizeFractionalDays2(self):
# Equivalent to (hours=1, minutes=30)
rd1 = relativedelta(hours=1.5)
self.assertEqual(rd1.normalized(), relativedelta(hours=1, minutes=30))
# Equivalent to (hours=3, minutes=17, seconds=5, microseconds=100)
rd2 = relativedelta(hours=3.28472225)
self.assertEqual(rd2.normalized(),
relativedelta(hours=3, minutes=17, seconds=5, microseconds=100))
def testRelativeDeltaNormalizeFractionalMinutes(self):
# Equivalent to (minutes=15, seconds=36)
rd1 = relativedelta(minutes=15.6)
self.assertEqual(rd1.normalized(),
relativedelta(minutes=15, seconds=36))
# Equivalent to (minutes=25, seconds=20, microseconds=25000)
rd2 = relativedelta(minutes=25.33375)
self.assertEqual(rd2.normalized(),
relativedelta(minutes=25, seconds=20, microseconds=25000))
def testRelativeDeltaNormalizeFractionalSeconds(self):
# Equivalent to (seconds=45, microseconds=25000)
rd1 = relativedelta(seconds=45.025)
self.assertEqual(rd1.normalized(),
relativedelta(seconds=45, microseconds=25000))
def testRelativeDeltaFractionalPositiveOverflow2(self):
# Equivalent to (days=1, hours=14)
rd1 = relativedelta(days=1.5, hours=2)
self.assertEqual(rd1.normalized(),
relativedelta(days=1, hours=14))
# Equivalent to (days=1, hours=14, minutes=45)
rd2 = relativedelta(days=1.5, hours=2.5, minutes=15)
self.assertEqual(rd2.normalized(),
relativedelta(days=1, hours=14, minutes=45))
# Carry back up - equivalent to:
# (days=2, hours=2, minutes=0, seconds=2, microseconds=3)
rd3 = relativedelta(days=1.5, hours=13, minutes=59.50045,
seconds=31.473, microseconds=500003)
self.assertEqual(rd3.normalized(),
relativedelta(days=2, hours=2, minutes=0,
seconds=2, microseconds=3))
def testRelativeDeltaFractionalNegativeOverflow(self):
# Equivalent to (days=-1)
rd1 = relativedelta(days=-0.5, hours=-12)
self.assertEqual(rd1.normalized(),
relativedelta(days=-1))
# Equivalent to (days=-1)
rd2 = relativedelta(days=-1.5, hours=12)
self.assertEqual(rd2.normalized(),
relativedelta(days=-1))
# Equivalent to (days=-1, hours=-14, minutes=-45)
rd3 = relativedelta(days=-1.5, hours=-2.5, minutes=-15)
self.assertEqual(rd3.normalized(),
relativedelta(days=-1, hours=-14, minutes=-45))
# Equivalent to (days=-1, hours=-14, minutes=+15)
rd4 = relativedelta(days=-1.5, hours=-2.5, minutes=45)
self.assertEqual(rd4.normalized(),
relativedelta(days=-1, hours=-14, minutes=+15))
# Carry back up - equivalent to:
# (days=-2, hours=-2, minutes=0, seconds=-2, microseconds=-3)
rd3 = relativedelta(days=-1.5, hours=-13, minutes=-59.50045,
seconds=-31.473, microseconds=-500003)
self.assertEqual(rd3.normalized(),
relativedelta(days=-2, hours=-2, minutes=0,
seconds=-2, microseconds=-3))
def testInvalidYearDay(self):
with self.assertRaises(ValueError):
relativedelta(yearday=367)
def testAddTimedeltaToUnpopulatedRelativedelta(self):
td = timedelta(
days=1,
seconds=1,
microseconds=1,
milliseconds=1,
minutes=1,
hours=1,
weeks=1
)
expected = relativedelta(
weeks=1,
days=1,
hours=1,
minutes=1,
seconds=1,
microseconds=1001
)
self.assertEqual(expected, relativedelta() + td)
def testAddTimedeltaToPopulatedRelativeDelta(self):
td = timedelta(
days=1,
seconds=1,
microseconds=1,
milliseconds=1,
minutes=1,
hours=1,
weeks=1
)
rd = relativedelta(
year=1,
month=1,
day=1,
hour=1,
minute=1,
second=1,
microsecond=1,
years=1,
months=1,
days=1,
weeks=1,
hours=1,
minutes=1,
seconds=1,
microseconds=1
)
expected = relativedelta(
year=1,
month=1,
day=1,
hour=1,
minute=1,
second=1,
microsecond=1,
years=1,
months=1,
weeks=2,
days=2,
hours=2,
minutes=2,
seconds=2,
microseconds=1002,
)
self.assertEqual(expected, rd + td)
def testHashable(self):
try:
{relativedelta(minute=1): 'test'}
except:
self.fail("relativedelta() failed to hash!")
def testDayOfMonthPlus(self):
assert [
date(2021, 1, 28) + relativedelta(months=1),
date(2021, 2, 27) + relativedelta(months=1),
date(2021, 4, 29) + relativedelta(months=1),
date(2021, 5, 30) + relativedelta(months=1),
] == [
date(2021, 2, 28),
date(2021, 3, 27),
date(2021, 5, 29),
date(2021, 6, 30),
]
def testLastDayOfMonthPlus(self):
assert [
date(2021, 1, 31) + relativedelta(months=1),
date(2021, 1, 30) + relativedelta(months=1),
date(2021, 1, 29) + relativedelta(months=1),
date(2021, 1, 28) + relativedelta(months=1),
date(2021, 2, 28) + relativedelta(months=1),
date(2021, 4, 30) + relativedelta(months=1),
date(2021, 5, 31) + relativedelta(months=1),
] == [
date(2021, 2, 28),
date(2021, 2, 28),
date(2021, 2, 28),
date(2021, 2, 28),
date(2021, 3, 28),
date(2021, 5, 30),
date(2021, 6, 30),
]
def testDayOfMonthMinus(self):
assert [
date(2021, 2, 27) - relativedelta(months=1),
date(2021, 3, 30) - relativedelta(months=1),
date(2021, 3, 29) - relativedelta(months=1),
date(2021, 3, 28) - relativedelta(months=1),
date(2021, 5, 30) - relativedelta(months=1),
date(2021, 6, 29) - relativedelta(months=1),
] == [
date(2021, 1, 27),
date(2021, 2, 28),
date(2021, 2, 28),
date(2021, 2, 28),
date(2021, 4, 30),
date(2021, 5, 29),
]
def testLastDayOfMonthMinus(self):
assert [
date(2021, 2, 28) - relativedelta(months=1),
date(2021, 3, 31) - relativedelta(months=1),
date(2021, 5, 31) - relativedelta(months=1),
date(2021, 6, 30) - relativedelta(months=1),
] == [
date(2021, 1, 28),
date(2021, 2, 28),
date(2021, 4, 30),
date(2021, 5, 30),
]
| RelativeDeltaTest |
python | ray-project__ray | python/ray/serve/tests/test_http_headers.py | {
"start": 1133,
"end": 5823
} | class ____:
def verify_result(self):
for header_attr in ["X-Request-ID"]:
resp = httpx.get(
f"{get_application_url()}", headers={header_attr: "123-234"}
)
assert resp.status_code == 200
assert resp.json() == 1
assert resp.headers[header_attr] == "123-234"
def test_basic(self, serve_instance):
@serve.deployment
class Model:
def __call__(self) -> int:
request_id = ray.serve.context._get_serve_request_context().request_id
assert request_id == "123-234"
return 1
serve.run(Model.bind())
self.verify_result()
def test_fastapi(self, serve_instance):
app = FastAPI()
@serve.deployment
@serve.ingress(app)
class Model:
@app.get("/")
def say_hi(self) -> int:
request_id = ray.serve.context._get_serve_request_context().request_id
assert request_id == "123-234"
return 1
serve.run(Model.bind())
self.verify_result()
def test_starlette_resp(self, serve_instance):
@serve.deployment
class Model:
def __call__(self) -> int:
request_id = ray.serve.context._get_serve_request_context().request_id
assert request_id == "123-234"
return starlette.responses.Response("1", media_type="application/json")
serve.run(Model.bind())
self.verify_result()
def test_set_request_id_headers_with_two_attributes(serve_instance):
"""Test that request id is set with X-Request-ID and RAY_SERVE_REQUEST_ID.
x-request-id has higher priority.
"""
@serve.deployment
class Model:
def __call__(self):
request_id = ray.serve.context._get_serve_request_context().request_id
return request_id
serve.run(Model.bind())
resp = httpx.get(
get_application_url(),
headers={
"X-Request-ID": "234",
},
)
assert resp.status_code == 200
assert SERVE_HTTP_REQUEST_ID_HEADER in resp.headers
assert resp.text == resp.headers[SERVE_HTTP_REQUEST_ID_HEADER]
def test_reuse_request_id(serve_instance):
"""Test client re-uses request id.
When multiple requests are submitted with the same request id at around the same
time, the proxy should continue to track the correct request objects, setting
the correct request id in the serve context, and return the original x-request-id
request header as the response header.
For more details, see https://github.com/ray-project/ray/issues/45723.
"""
app = FastAPI()
@serve.deployment(num_replicas=3)
@serve.ingress(app)
class MyFastAPIDeployment:
@app.post("/hello")
def root(self, user_input: Dict[str, str]) -> Dict[str, str]:
request_id = ray.serve.context._get_serve_request_context().request_id
return {
"app_name": user_input["app_name"],
"serve_context_request_id": request_id,
}
serve.run(MyFastAPIDeployment.bind())
async def send_request(
session: ClientSession, body: Dict[str, Any], request_id: Optional[str]
) -> Tuple[str, str]:
headers = {SERVE_HTTP_REQUEST_ID_HEADER: request_id}
url = "http://localhost:8000/hello"
async with session.post(url=url, headers=headers, json=body) as response:
result = await response.json()
# Ensure the request object is tracked correctly.
assert result["app_name"] == body["app_name"]
# Ensure the request id from the serve context is set correctly.
assert result["serve_context_request_id"] == request_id
# Ensure the request id from the response header is returned correctly.
assert response.headers[SERVE_HTTP_REQUEST_ID_HEADER] == request_id
async def main():
"""Sending 20 requests in parallel all with the same request id, but with
different request body.
"""
bodies = [{"app_name": f"an_{generate_request_id()}"} for _ in range(20)]
connector = TCPConnector(ssl=False)
async with aiohttp.ClientSession(connector=connector) as session:
request_id = f"rid_{generate_request_id()}"
tasks = [
send_request(session, body, request_id=request_id) for body in bodies
]
await asyncio.gather(*tasks)
asyncio.run(main())
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestUserProvidedRequestIDHeader |
python | Netflix__metaflow | test/core/tests/detect_segfault.py | {
"start": 67,
"end": 1284
} | class ____(MetaflowTest):
"""
Test that segmentation faults produce a message in the logs
"""
PRIORITY = 2
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
SHOULD_FAIL = True
@steps(0, ["singleton-end"], required=True)
def step_end(self):
# cause a segfault
import ctypes
print("Crash and burn!")
ctypes.string_at(0)
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
# CLI logs requires the exact task ID for failed tasks which
# we don't have here. Let's rely on the Metadata checker only.
run = checker.get_run()
if run:
# loglines prior to the segfault should be persisted
checker.assert_log("end", "stdout", "Crash and burn!", exact_match=False)
# a message should be printed that mentions "segmentation fault"
checker.assert_log("end", "stderr", "segmentation fault", exact_match=False)
| DetectSegFaultTest |
python | huggingface__transformers | tests/models/patchtst/test_modeling_patchtst.py | {
"start": 5104,
"end": 11510
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
PatchTSTModel,
PatchTSTForPrediction,
PatchTSTForPretraining,
PatchTSTForClassification,
PatchTSTForRegression,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"feature-extraction": PatchTSTModel} if is_torch_available() else {}
is_encoder_decoder = False
test_missing_keys = True
test_inputs_embeds = False
test_resize_embeddings = True
test_resize_position_embeddings = False
test_mismatched_shapes = True
has_attentions = True
def setUp(self):
self.model_tester = PatchTSTModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=PatchTSTConfig,
has_text_modality=False,
prediction_length=self.model_tester.prediction_length,
)
def test_config(self):
self.config_tester.run_common_tests()
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
# if PatchTSTForPretraining
if model_class == PatchTSTForPretraining:
inputs_dict.pop("future_values")
# else if classification model:
elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING):
rng = random.Random(self.model_tester.seed)
labels = ids_tensor([self.model_tester.batch_size], self.model_tester.num_targets, rng=rng)
inputs_dict["target_values"] = labels
inputs_dict.pop("future_values")
elif model_class in get_values(MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING):
rng = random.Random(self.model_tester.seed)
target_values = floats_tensor([self.model_tester.batch_size, self.model_tester.num_targets], rng=rng)
inputs_dict["target_values"] = target_values
inputs_dict.pop("future_values")
return inputs_dict
def test_save_load_strict(self):
config, _ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers
)
self.assertEqual(len(hidden_states), expected_num_layers)
num_patch = self.model_tester.num_patches
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[num_patch, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="we have no tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
def test_model_main_input_name(self):
model_signature = inspect.signature(getattr(PatchTSTModel, "forward"))
# The main input is the name of the argument after `self`
observed_main_input_name = list(model_signature.parameters.keys())[1]
self.assertEqual(PatchTSTModel.main_input_name, observed_main_input_name)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model_class == PatchTSTForPretraining:
expected_arg_names = [
"past_values",
"past_observed_mask",
]
elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING) or model_class in get_values(
MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING
):
expected_arg_names = ["past_values", "target_values", "past_observed_mask"]
else:
expected_arg_names = [
"past_values",
"past_observed_mask",
"future_values",
]
expected_arg_names.extend(
[
"output_hidden_states",
"output_attentions",
"return_dict",
]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
@is_flaky()
def test_retain_grad_hidden_states_attentions(self):
super().test_retain_grad_hidden_states_attentions()
@unittest.skip(reason="Model does not have input embeddings")
def test_model_get_set_embeddings(self):
pass
def prepare_batch(repo_id="hf-internal-testing/etth1-hourly-batch", file="train-batch.pt"):
file = hf_hub_download(repo_id=repo_id, filename=file, repo_type="dataset")
check_torch_load_is_safe()
batch = torch.load(file, map_location=torch_device, weights_only=True)
return batch
# Note: Pretrained model is not yet downloadable.
@require_torch
@slow
| PatchTSTModelTest |
python | apache__thrift | lib/py/src/server/THttpServer.py | {
"start": 951,
"end": 1565
} | class ____(Exception):
"""Allows handlers to override the HTTP response
Normally, THttpServer always sends a 200 response. If a handler wants
to override this behavior (e.g., to simulate a misconfigured or
overloaded web server during testing), it can raise a ResponseException.
The function passed to the constructor will be called with the
RequestHandler as its only argument. Note that this is irrelevant
for ONEWAY requests, as the HTTP response must be sent before the
RPC is processed.
"""
def __init__(self, handler):
self.handler = handler
| ResponseException |
python | langchain-ai__langchain | libs/core/langchain_core/tracers/evaluation.py | {
"start": 1017,
"end": 8367
} | class ____(BaseTracer):
"""Tracer that runs a run evaluator whenever a run is persisted.
Attributes:
client : Client
The LangSmith client instance used for evaluating the runs.
"""
name: str = "evaluator_callback_handler"
example_id: UUID | None = None
"""The example ID associated with the runs."""
client: langsmith.Client
"""The LangSmith client instance used for evaluating the runs."""
evaluators: Sequence[langsmith.RunEvaluator] = ()
"""The sequence of run evaluators to be executed."""
executor: ThreadPoolExecutor | None = None
"""The thread pool executor used for running the evaluators."""
futures: weakref.WeakSet[Future] = weakref.WeakSet()
"""The set of futures representing the running evaluators."""
skip_unfinished: bool = True
"""Whether to skip runs that are not finished or raised an error."""
project_name: str | None = None
"""The LangSmith project name to be organize eval chain runs under."""
logged_eval_results: dict[tuple[str, str], list[EvaluationResult]]
lock: threading.Lock
def __init__(
self,
evaluators: Sequence[langsmith.RunEvaluator],
client: langsmith.Client | None = None,
example_id: UUID | str | None = None,
skip_unfinished: bool = True, # noqa: FBT001,FBT002
project_name: str | None = "evaluators",
max_concurrency: int | None = None,
**kwargs: Any,
) -> None:
"""Create an EvaluatorCallbackHandler.
Args:
evaluators : Sequence[RunEvaluator]
The run evaluators to apply to all top level runs.
client : LangSmith Client, optional
The LangSmith client instance to use for evaluating the runs.
If not specified, a new instance will be created.
example_id : Union[UUID, str], optional
The example ID to be associated with the runs.
skip_unfinished: bool, optional
Whether to skip unfinished runs.
project_name : str, optional
The LangSmith project name to be organize eval chain runs under.
max_concurrency : int, optional
The maximum number of concurrent evaluators to run.
"""
super().__init__(**kwargs)
self.example_id = (
UUID(example_id) if isinstance(example_id, str) else example_id
)
self.client = client or langchain_tracer.get_client()
self.evaluators = evaluators
if max_concurrency is None:
self.executor = _get_executor()
elif max_concurrency > 0:
self.executor = ThreadPoolExecutor(max_workers=max_concurrency)
weakref.finalize(
self,
lambda: cast("ThreadPoolExecutor", self.executor).shutdown(wait=True),
)
else:
self.executor = None
self.futures = weakref.WeakSet()
self.skip_unfinished = skip_unfinished
self.project_name = project_name
self.logged_eval_results = {}
self.lock = threading.Lock()
_TRACERS.add(self)
def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None:
"""Evaluate the run in the project.
Args:
run: The run to be evaluated.
evaluator: The evaluator to use for evaluating the run.
"""
try:
if self.project_name is None:
eval_result = self.client.evaluate_run(run, evaluator)
eval_results = [eval_result]
with tracing_v2_enabled(
project_name=self.project_name, tags=["eval"], client=self.client
) as cb:
reference_example = (
self.client.read_example(run.reference_example_id)
if run.reference_example_id
else None
)
evaluation_result = evaluator.evaluate_run(
# This is subclass, but getting errors for some reason
run, # type: ignore[arg-type]
example=reference_example,
)
eval_results = self._log_evaluation_feedback(
evaluation_result,
run,
source_run_id=cb.latest_run.id if cb.latest_run else None,
)
except Exception:
logger.exception(
"Error evaluating run %s with %s",
run.id,
evaluator.__class__.__name__,
)
raise
example_id = str(run.reference_example_id)
with self.lock:
for res in eval_results:
run_id = str(getattr(res, "target_run_id", run.id))
self.logged_eval_results.setdefault((run_id, example_id), []).append(
res
)
def _select_eval_results(
self,
results: EvaluationResult | EvaluationResults,
) -> list[EvaluationResult]:
if isinstance(results, EvaluationResult):
results_ = [results]
elif isinstance(results, dict) and "results" in results:
results_ = results["results"]
else:
msg = (
f"Invalid evaluation result type {type(results)}."
" Expected EvaluationResult or EvaluationResults."
)
raise TypeError(msg)
return results_
def _log_evaluation_feedback(
self,
evaluator_response: EvaluationResult | EvaluationResults,
run: Run,
source_run_id: UUID | None = None,
) -> list[EvaluationResult]:
results = self._select_eval_results(evaluator_response)
for res in results:
source_info_: dict[str, Any] = {}
if res.evaluator_info:
source_info_ = {**res.evaluator_info, **source_info_}
run_id_ = getattr(res, "target_run_id", None)
if run_id_ is None:
run_id_ = run.id
self.client.create_feedback(
run_id_,
res.key,
score=res.score,
value=res.value,
comment=res.comment,
correction=res.correction,
source_info=source_info_,
source_run_id=res.source_run_id or source_run_id,
feedback_source_type=langsmith.schemas.FeedbackSourceType.MODEL,
)
return results
def _persist_run(self, run: Run) -> None:
"""Run the evaluator on the run.
Args:
run: The run to be evaluated.
"""
if self.skip_unfinished and not run.outputs:
logger.debug("Skipping unfinished run %s", run.id)
return
run_ = run.copy()
run_.reference_example_id = self.example_id
for evaluator in self.evaluators:
if self.executor is None:
self._evaluate_in_project(run_, evaluator)
else:
self.futures.add(
self.executor.submit(self._evaluate_in_project, run_, evaluator)
)
def wait_for_futures(self) -> None:
"""Wait for all futures to complete."""
wait(self.futures)
| EvaluatorCallbackHandler |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/dependency.py | {
"start": 31936,
"end": 36396
} | class ____(_DependencyProcessor):
"""For many-to-one relationships with no one-to-many backref,
searches for parents through the unit of work when a primary
key has changed and updates them.
Theoretically, this approach could be expanded to support transparent
deletion of objects referenced via many-to-one as well, although
the current attribute system doesn't do enough bookkeeping for this
to be efficient.
"""
def per_property_preprocessors(self, uow):
if self.prop._reverse_property:
if self.passive_updates:
return
else:
if False in (
prop.passive_updates
for prop in self.prop._reverse_property
):
return
uow.register_preprocessor(self, False)
def per_property_flush_actions(self, uow):
parent_saves = unitofwork._SaveUpdateAll(uow, self.parent.base_mapper)
after_save = unitofwork._ProcessAll(uow, self, False, False)
uow.dependencies.update([(parent_saves, after_save)])
def per_state_flush_actions(self, uow, states, isdelete):
pass
def presort_deletes(self, uowcommit, states):
pass
def presort_saves(self, uow, states):
if not self.passive_updates:
# for non-passive updates, register in the preprocess stage
# so that mapper save_obj() gets a hold of changes
self._process_key_switches(states, uow)
def prop_has_changes(self, uow, states, isdelete):
if not isdelete and self.passive_updates:
d = self._key_switchers(uow, states)
return bool(d)
return False
def process_deletes(self, uowcommit, states):
assert False
def process_saves(self, uowcommit, states):
# for passive updates, register objects in the process stage
# so that we avoid ManyToOneDP's registering the object without
# the listonly flag in its own preprocess stage (results in UPDATE)
# statements being emitted
assert self.passive_updates
self._process_key_switches(states, uowcommit)
def _key_switchers(self, uow, states):
switched, notswitched = uow.memo(
("pk_switchers", self), lambda: (set(), set())
)
allstates = switched.union(notswitched)
for s in states:
if s not in allstates:
if self._pks_changed(uow, s):
switched.add(s)
else:
notswitched.add(s)
return switched
def _process_key_switches(self, deplist, uowcommit):
switchers = self._key_switchers(uowcommit, deplist)
if switchers:
# if primary key values have actually changed somewhere, perform
# a linear search through the UOW in search of a parent.
for state in uowcommit.session.identity_map.all_states():
if not issubclass(state.class_, self.parent.class_):
continue
dict_ = state.dict
related = state.get_impl(self.key).get(
state, dict_, passive=self._passive_update_flag
)
if (
related is not attributes.PASSIVE_NO_RESULT
and related is not None
):
if self.prop.uselist:
if not related:
continue
related_obj = related[0]
else:
related_obj = related
related_state = attributes.instance_state(related_obj)
if related_state in switchers:
uowcommit.register_object(
state, False, self.passive_updates
)
sync._populate(
related_state,
self.mapper,
state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
self.passive_updates,
)
def _pks_changed(self, uowcommit, state):
return bool(state.key) and sync._source_modified(
uowcommit, state, self.mapper, self.prop.synchronize_pairs
)
| _DetectKeySwitch |
python | bokeh__bokeh | tests/unit/bokeh/document/test_events__document.py | {
"start": 12478,
"end": 14084
} | class ____:
def test_init(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsPatchedEvent(doc, m, "data", [1, 2], "setter", "invoker")
assert e.document == doc
assert e.model == m
assert e.attr == "data"
assert e.patches == [1, 2]
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_kind(self) -> None:
assert bde.ColumnsPatchedEvent.kind == "ColumnsPatched"
def test_to_serializable(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsPatchedEvent(doc, m, "data", [1, 2], "setter", "invoker")
s = Serializer()
r = s.encode(e)
assert r == dict(kind=e.kind, model=m.ref, attr="data", patches=[1,2])
assert s.buffers == []
def test_dispatch(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsPatchedEvent(doc, m, "data", [1, 2], "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched', '_columns_patched']
def test_combine_ignores_all(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsPatchedEvent(doc, m, "data", [1,2], "setter", "invoker")
e2 = bde.ColumnsPatchedEvent(doc, m, "data", [3,4], "setter", "invoker")
assert e.combine(e2) is False
assert e.patches == [1,2]
# TitleChangedEvent -----------------------------------------------------------
| TestColumnsPatchedEvent |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_michigan_zip.py | {
"start": 1751,
"end": 4094
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Michigan zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_michigan_zip": ["48209", "48417", "48701", "49066"],
"invalid_michigan_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_michigan_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_michigan_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_michigan_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidMichiganZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidMichiganZip |
python | pypa__setuptools | setuptools/command/develop.py | {
"start": 160,
"end": 1411
} | class ____(Command):
"""Set up package for development"""
user_options = [
("install-dir=", "d", "install package to DIR"),
('no-deps', 'N', "don't install dependencies"),
('user', None, f"install in user site-package '{site.USER_SITE}'"),
('prefix=', None, "installation prefix"),
("index-url=", "i", "base URL of Python Package Index"),
]
boolean_options = [
'no-deps',
'user',
]
install_dir = None
no_deps = False
user = False
prefix = None
index_url = None
def run(self) -> None:
# Casting because mypy doesn't understand bool mult conditionals
cmd = cast(
list[str],
[sys.executable, '-m', 'pip', 'install', '-e', '.', '--use-pep517']
+ ['--target', self.install_dir] * bool(self.install_dir)
+ ['--no-deps'] * self.no_deps
+ ['--user'] * self.user
+ ['--prefix', self.prefix] * bool(self.prefix)
+ ['--index-url', self.index_url] * bool(self.index_url),
)
subprocess.check_call(cmd)
def initialize_options(self) -> None:
DevelopDeprecationWarning.emit()
def finalize_options(self) -> None:
pass
| develop |
python | streamlit__streamlit | lib/streamlit/watcher/path_watcher.py | {
"start": 1346,
"end": 5716
} | class ____:
def __init__(
self,
_path_str: str,
_on_changed: Callable[[str], None],
*, # keyword-only arguments:
glob_pattern: str | None = None,
allow_nonexistent: bool = False,
) -> None:
pass
# EventBasedPathWatcher will be a stub and have no functional
# implementation if its import failed (due to missing watchdog module),
# so we can't reference it directly in this type.
PathWatcherType: TypeAlias = (
type["streamlit.watcher.event_based_path_watcher.EventBasedPathWatcher"]
| type[PollingPathWatcher]
| type[NoOpPathWatcher]
)
def _is_watchdog_available() -> bool:
"""Check if the watchdog module is installed."""
try:
import watchdog # noqa: F401
return True
except ImportError:
return False
def report_watchdog_availability() -> None:
if (
config.get_option("server.fileWatcherType") not in ["poll", "none"]
and not _is_watchdog_available()
):
msg = "\n $ xcode-select --install" if env_util.IS_DARWIN else ""
cli_util.print_to_cli(
" For better performance, install the Watchdog module:",
fg="blue",
bold=True,
)
cli_util.print_to_cli(
f"""{msg}
$ pip install watchdog
"""
)
def _watch_path(
path: str,
on_path_changed: Callable[[str], None],
watcher_type: str | None = None,
*, # keyword-only arguments:
glob_pattern: str | None = None,
allow_nonexistent: bool = False,
) -> bool:
"""Create a PathWatcher for the given path if we have a viable
PathWatcher class.
Parameters
----------
path
Path to watch.
on_path_changed
Function that's called when the path changes.
watcher_type
Optional watcher_type string. If None, it will default to the
'server.fileWatcherType` config option.
glob_pattern
Optional glob pattern to use when watching a directory. If set, only
files matching the pattern will be counted as being created/deleted
within the watched directory.
allow_nonexistent
If True, allow the file or directory at the given path to be
nonexistent.
Returns
-------
bool
True if the path is being watched, or False if we have no
PathWatcher class.
"""
if watcher_type is None:
watcher_type = config.get_option("server.fileWatcherType")
watcher_class = get_path_watcher_class(watcher_type)
if watcher_class is NoOpPathWatcher:
return False
watcher_class(
path,
on_path_changed,
glob_pattern=glob_pattern,
allow_nonexistent=allow_nonexistent,
)
return True
def watch_file(
path: str,
on_file_changed: Callable[[str], None],
watcher_type: str | None = None,
) -> bool:
return _watch_path(path, on_file_changed, watcher_type)
def watch_dir(
path: str,
on_dir_changed: Callable[[str], None],
watcher_type: str | None = None,
*, # keyword-only arguments:
glob_pattern: str | None = None,
allow_nonexistent: bool = False,
) -> bool:
# Add a trailing slash to the path to ensure
# that its interpreted as a directory.
path = os.path.join(path, "")
return _watch_path(
path,
on_dir_changed,
watcher_type,
glob_pattern=glob_pattern,
allow_nonexistent=allow_nonexistent,
)
def get_default_path_watcher_class() -> PathWatcherType:
"""Return the class to use for path changes notifications, based on the
server.fileWatcherType config option.
"""
return get_path_watcher_class(config.get_option("server.fileWatcherType"))
def get_path_watcher_class(watcher_type: str) -> PathWatcherType:
"""Return the PathWatcher class that corresponds to the given watcher_type
string. Acceptable values are 'auto', 'watchdog', 'poll' and 'none'.
"""
if watcher_type in {"watchdog", "auto"} and _is_watchdog_available():
# Lazy-import this module to prevent unnecessary imports of the watchdog package.
from streamlit.watcher.event_based_path_watcher import EventBasedPathWatcher
return EventBasedPathWatcher
if watcher_type in {"auto", "poll"}:
return PollingPathWatcher
return NoOpPathWatcher
| NoOpPathWatcher |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dlp.py | {
"start": 7122,
"end": 7995
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_create_stored_info_type(self, mock_hook):
mock_hook.return_value.create_stored_info_type.return_value = StoredInfoType(name=DLP_JOB_PATH)
operator = CloudDLPCreateStoredInfoTypeOperator(organization_id=ORGANIZATION_ID, task_id="id")
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_stored_info_type.assert_called_once_with(
organization_id=ORGANIZATION_ID,
project_id=None,
config=None,
stored_info_type_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudDLPCreateStoredInfoTypeOperator |
python | bokeh__bokeh | src/bokeh/core/validation/issue.py | {
"start": 1420,
"end": 1507
} | class ____:
code: int
name: str
description: str
@dataclass(frozen=True)
| Issue |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 6763,
"end": 7063
} | class ____:
params = [[10**3, 10**6], ["fast", "slow"], ["bool", "boolean"]]
param_names = ["N", "case", "dtype"]
def setup(self, N, case, dtype):
val = case != "fast"
self.s = Series([val] * N, dtype=dtype)
def time_all(self, N, case, dtype):
self.s.all()
| All |
python | astropy__astropy | astropy/modeling/fitting.py | {
"start": 63976,
"end": 64629
} | class ____(_NLLSQFitter):
"""
Trust Region Reflective algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covariance matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
@deprecated_renamed_argument("use_min_max_bounds", None, "7.0")
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("trf", calc_uncertainties, use_min_max_bounds)
| TRFLSQFitter |
python | getsentry__sentry | src/sentry/migrations/0981_add_dashboard_migration_fields.py | {
"start": 155,
"end": 1617
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0980_integrations_json_field"),
]
operations = [
migrations.AddField(
model_name="dashboardwidget",
name="changed_reason",
field=models.JSONField(null=True),
),
migrations.AddField(
model_name="dashboardwidget",
name="widget_snapshot",
field=models.JSONField(null=True),
),
]
| Migration |
python | neetcode-gh__leetcode | python/0297-serialize-and-deserialize-binary-tree.py | {
"start": 172,
"end": 797
} | class ____:
def serialize(self, root):
res = []
def dfs(node):
if not node:
res.append("N")
return
res.append(str(node.val))
dfs(node.left)
dfs(node.right)
dfs(root)
return ",".join(res)
def deserialize(self, data):
vals = data.split(",")
def dfs():
val = vals.pop(0)
if val == "N":
return None
node = TreeNode(val=int(val))
node.left = dfs()
node.right = dfs()
return node
return dfs()
| Codec |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/renderer.py | {
"start": 10231,
"end": 10341
} | class ____(Exception):
"Information unavailable. Did not yet receive the CPR response."
| HeightIsUnknownError |
python | kamyu104__LeetCode-Solutions | Python/campus-bikes.py | {
"start": 67,
"end": 1114
} | class ____(object):
def assignBikes(self, workers, bikes):
"""
:type workers: List[List[int]]
:type bikes: List[List[int]]
:rtype: List[int]
"""
def manhattan(p1, p2):
return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])
distances = [[] for _ in xrange(len(workers))]
for i in xrange(len(workers)):
for j in xrange(len(bikes)):
distances[i].append((manhattan(workers[i], bikes[j]), i, j))
distances[i].sort(reverse = True)
result = [None] * len(workers)
lookup = set()
min_heap = []
for i in xrange(len(workers)):
heapq.heappush(min_heap, distances[i].pop())
while len(lookup) < len(workers):
_, worker, bike = heapq.heappop(min_heap)
if bike not in lookup:
result[worker] = bike
lookup.add(bike)
else:
heapq.heappush(min_heap, distances[worker].pop())
return result
| Solution |
python | huggingface__transformers | src/transformers/models/camembert/modeling_camembert.py | {
"start": 10655,
"end": 12061
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = CamembertCrossAttention if is_cross_attention else CamembertSelfAttention
self.self = attention_class(config, is_causal=is_causal, layer_idx=layer_idx)
self.output = CamembertSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self.output(attention_output, hidden_states)
return attention_output, attn_weights
| CamembertAttention |
python | pypa__pip | src/pip/_internal/utils/misc.py | {
"start": 15844,
"end": 19310
} | class ____:
secret: str
redacted: str
def __repr__(self) -> str:
return f"<HiddenText {str(self)!r}>"
def __str__(self) -> str:
return self.redacted
def __eq__(self, other: object) -> bool:
# Equality is particularly useful for testing.
if type(self) is type(other):
# The string being used for redaction doesn't also have to match,
# just the raw, original string.
return self.secret == cast(HiddenText, other).secret
return NotImplemented
# Disable hashing, since we have a custom __eq__ and don't need hash-ability
# (yet). The only required property of hashing is that objects which compare
# equal have the same hash value.
__hash__ = None # type: ignore[assignment]
def hide_value(value: str) -> HiddenText:
return HiddenText(value, redacted="****")
def hide_url(url: str) -> HiddenText:
redacted = redact_auth_from_url(url)
return HiddenText(url, redacted=redacted)
def protect_pip_from_modification_on_windows(modifying_pip: bool) -> None:
"""Protection of pip.exe from modification on Windows
On Windows, any operation modifying pip should be run as:
python -m pip ...
"""
pip_names = [
"pip",
f"pip{sys.version_info.major}",
f"pip{sys.version_info.major}.{sys.version_info.minor}",
]
# See https://github.com/pypa/pip/issues/1299 for more discussion
should_show_use_python_msg = (
modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names
)
if should_show_use_python_msg:
new_command = [sys.executable, "-m", "pip"] + sys.argv[1:]
raise CommandError(
"To modify pip, please run the following command:\n{}".format(
" ".join(new_command)
)
)
def check_externally_managed() -> None:
"""Check whether the current environment is externally managed.
If the ``EXTERNALLY-MANAGED`` config file is found, the current environment
is considered externally managed, and an ExternallyManagedEnvironment is
raised.
"""
if running_under_virtualenv():
return
marker = os.path.join(sysconfig.get_path("stdlib"), "EXTERNALLY-MANAGED")
if not os.path.isfile(marker):
return
raise ExternallyManagedEnvironment.from_config(marker)
def is_console_interactive() -> bool:
"""Is this console interactive?"""
return sys.stdin is not None and sys.stdin.isatty()
def hash_file(path: str, blocksize: int = 1 << 20) -> tuple[Any, int]:
"""Return (hash, length) for path using hashlib.sha256()"""
h = hashlib.sha256()
length = 0
with open(path, "rb") as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
return h, length
def pairwise(iterable: Iterable[Any]) -> Iterator[tuple[Any, Any]]:
"""
Return paired elements.
For example:
s -> (s0, s1), (s2, s3), (s4, s5), ...
"""
iterable = iter(iterable)
return zip_longest(iterable, iterable)
def partition(
pred: Callable[[T], bool], iterable: Iterable[T]
) -> tuple[Iterable[T], Iterable[T]]:
"""
Use a predicate to partition entries into false entries and true entries,
like
partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
"""
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
| HiddenText |
python | psf__black | tests/data/cases/class_methods_new_line.py | {
"start": 187,
"end": 288
} | class ____:
"""Just a docstring."""
def __init__(self):
pass
| ClassWithTheDocstringAndInit |
python | ray-project__ray | release/nightly_tests/placement_group_tests/pg_run.py | {
"start": 281,
"end": 463
} | class ____(object):
def __init__(self, i):
self.i = i
def work(self):
time.sleep(0.1)
print("work ", self.i)
@ray.remote(num_cpus=1, num_gpus=1)
| Worker |
python | kamyu104__LeetCode-Solutions | Python/find-the-city-with-the-smallest-number-of-neighbors-at-a-threshold-distance.py | {
"start": 33,
"end": 734
} | class ____(object):
def findTheCity(self, n, edges, distanceThreshold):
"""
:type n: int
:type edges: List[List[int]]
:type distanceThreshold: int
:rtype: int
"""
dist = [[float("inf")]*n for _ in xrange(n)]
for i, j, w in edges:
dist[i][j] = dist[j][i] = w
for i in xrange(n):
dist[i][i] = 0
for k in xrange(n):
for i in xrange(n):
for j in xrange(n):
dist[i][j] = min(dist[i][j], dist[i][k]+dist[k][j])
result = {sum(d <= distanceThreshold for d in dist[i]): i for i in xrange(n)}
return result[min(result.iterkeys())]
| Solution |
python | doocs__leetcode | solution/0700-0799/0743.Network Delay Time/Solution.py | {
"start": 0,
"end": 623
} | class ____:
def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:
g = [[inf] * n for _ in range(n)]
for u, v, w in times:
g[u - 1][v - 1] = w
dist = [inf] * n
dist[k - 1] = 0
vis = [False] * n
for _ in range(n):
t = -1
for j in range(n):
if not vis[j] and (t == -1 or dist[t] > dist[j]):
t = j
vis[t] = True
for j in range(n):
dist[j] = min(dist[j], dist[t] + g[t][j])
ans = max(dist)
return -1 if ans == inf else ans
| Solution |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/dataframeeditor.py | {
"start": 3536,
"end": 4451
} | class ____:
Close = 'close'
ConvertToBool = 'convert_to_bool_action'
ConvertToComplex = 'convert_to_complex_action'
ConvertToFloat = 'convert_to_float_action'
ConvertToInt = 'convert_to_int_action'
ConvertToStr = 'convert_to_str_action'
Copy = 'copy_action'
DuplicateColumn = 'duplicate_column_action'
DuplicateRow = 'duplicate_row_action'
Edit = 'edit_action'
EditHeader = 'edit_header_action'
EditIndex = 'edit_index_action'
Histogram = 'histogram'
InsertAbove = 'insert_above_action'
InsertAfter = 'insert_after_action'
InsertBefore = 'insert_before_action'
InsertBelow = 'insert_below_action'
Preferences = 'preferences_action'
Refresh = 'refresh_action'
RemoveColumn = 'remove_column_action'
RemoveRow = 'remove_row_action'
ResizeColumns = 'resize_columns_action'
ResizeRows = 'resize_rows_action'
| DataframeEditorActions |
python | realpython__materials | python-guitar-synthesizer/source_code_final/src/digitar/instrument.py | {
"start": 470,
"end": 798
} | class ____:
strings: tuple[VibratingString, ...]
@classmethod
def from_notes(cls, *notes: str) -> Self:
return cls(
tuple(
VibratingString(Pitch.from_scientific_notation(note))
for note in reversed(notes)
)
)
@dataclass(frozen=True)
| StringTuning |
python | automl__auto-sklearn | test/test_metalearning/pyMetaLearn/test_metalearning_configuration.py | {
"start": 284,
"end": 1879
} | class ____(unittest.TestCase):
def test_metalearning_cs_size(self):
self.cwd = os.getcwd()
data_dir = os.path.dirname(__file__)
data_dir = os.path.join(data_dir, "test_meta_base_data")
os.chdir(data_dir)
# Total: 176, categorical: 3, numerical: 7, string: 7
total = 179
num_numerical = 6
num_string = 11
num_categorical = 3
for feat_type, cs_size in [
({"A": "numerical"}, total - num_string - num_categorical),
({"A": "categorical"}, total - num_string - num_numerical),
({"A": "string"}, total - num_categorical - num_numerical),
({"A": "numerical", "B": "categorical"}, total - num_string),
({"A": "numerical", "B": "string"}, total - num_categorical),
({"A": "categorical", "B": "string"}, total - num_numerical),
({"A": "categorical", "B": "string", "C": "numerical"}, total),
]:
pipeline = autosklearn.pipeline.classification.SimpleClassificationPipeline(
feat_type=feat_type
)
self.cs = pipeline.get_hyperparameter_search_space(feat_type=feat_type)
self.logger = logging.getLogger()
meta_base = MetaBase(self.cs, data_dir, logger=self.logger)
self.meta_optimizer = metalearner.MetaLearningOptimizer(
"233", self.cs, meta_base, logger=self.logger
)
self.assertEqual(
len(self.meta_optimizer.configuration_space), cs_size, feat_type
)
| MetalearningConfiguration |
python | getsentry__sentry | src/sentry/rules/conditions/event_attribute.py | {
"start": 7708,
"end": 7956
} | class ____(AttributeHandler):
minimum_path_length = 1
@classmethod
def _handle(cls, path: list[str], event: GroupEvent) -> list[str]:
return [str(event.data.get("type"))]
@attribute_registry.register("extra")
| TypeAttributeHandler |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/memory_cleanup_test.py | {
"start": 1441,
"end": 6737
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
super(MemoryCleanupTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
def assertMemoryNotIncreasing(self, f, num_iters, max_increase_mb):
"""Assert memory usage doesn't increase beyond given threshold for f."""
# Warm up.
f()
# Wait for background threads to start up and allocate memory.
time.sleep(4)
initial = memory_profiler.memory_usage(-1)[0]
for _ in range(num_iters):
f()
increase = memory_profiler.memory_usage(-1)[0] - initial
logging.info("Memory increase observed: %f MB" % increase)
assert increase < max_increase_mb, (
"Increase is too high. Initial memory usage: %f MB. Increase: %f MB. "
"Maximum allowed increase: %f") % (initial, increase, max_increase_mb)
def assertNoMemoryLeak(self, dataset_fn):
"""Assert consuming elements from the dataset does not leak memory."""
def run():
get_next = self.getNext(dataset_fn())
for _ in range(100):
self.evaluate(get_next())
for _ in range(10):
run()
gc.collect()
def is_native_object(o):
# First check if `o` is a weakref proxy. Calling
# `isinstance(o, internal.NativeObject)` on an expired weak reference
# proxy will raise a ReferenceError.
if isinstance(o, weakref.ProxyTypes): return False
return isinstance(o, internal.NativeObject)
tensors = [
o for o in gc.get_objects() if is_native_object(o)
]
self.assertEmpty(tensors, "%d Tensors are still alive." % len(tensors))
@combinations.generate(test_base.eager_only_combinations())
def testEagerMemoryUsageWithReset(self):
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
def f():
self.evaluate(multi_device_iterator.get_next())
multi_device_iterator._eager_reset()
self.assertMemoryNotIncreasing(f, num_iters=50, max_increase_mb=250)
@combinations.generate(test_base.eager_only_combinations())
def testEagerMemoryUsageWithRecreation(self):
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
dataset = dataset_ops.Dataset.range(10)
def f():
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.get_next())
del multi_device_iterator
# TODO(b/123316347): Reduce threshold once bug is fixed.
self.assertMemoryNotIncreasing(f, num_iters=50, max_increase_mb=250)
@combinations.generate(test_base.eager_only_combinations())
def testFilter(self):
def get_dataset():
def fn(_):
return True
return dataset_ops.Dataset.range(0, 100).filter(fn)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(combinations.combine(tf_api_version=1, mode="eager"))
def testFilterLegacy(self):
def get_dataset():
def fn(_):
return True
return dataset_ops.Dataset.range(0, 100).filter_with_legacy_function(fn)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(test_base.eager_only_combinations())
def testFlatMap(self):
def get_dataset():
def fn(x):
return dataset_ops.Dataset.from_tensors(x * x)
return dataset_ops.Dataset.range(0, 100).flat_map(fn)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(test_base.eager_only_combinations())
def testFromGenerator(self):
def get_dataset():
def fn():
return range(100)
return dataset_ops.Dataset.from_generator(fn, output_types=dtypes.float32)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(num_parallel_calls=[None, 10])))
def testMap(self, num_parallel_calls):
def get_dataset():
def fn(x):
return x * x
return dataset_ops.Dataset.range(0, 100).map(
fn, num_parallel_calls=num_parallel_calls)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(
combinations.combine(
tf_api_version=1, mode="eager", num_parallel_calls=[None, 10]))
def testMapLegacy(self, num_parallel_calls):
def get_dataset():
def fn(x):
return x * x
return dataset_ops.Dataset.range(0, 100).map_with_legacy_function(
fn, num_parallel_calls=num_parallel_calls)
self.assertNoMemoryLeak(get_dataset)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(num_parallel_calls=[None, 10])))
def testInterleave(self, num_parallel_calls):
def get_dataset():
def fn(x):
return dataset_ops.Dataset.from_tensors(x * x)
return dataset_ops.Dataset.range(0, 100).interleave(
fn, num_parallel_calls=num_parallel_calls, cycle_length=10)
self.assertNoMemoryLeak(get_dataset)
if __name__ == "__main__":
test.main()
| MemoryCleanupTest |
python | davidhalter__parso | parso/python/errors.py | {
"start": 25683,
"end": 26716
} | class ____(SyntaxRule):
message = "from __future__ imports must occur at the beginning of the file"
def is_issue(self, node):
if _is_future_import(node):
if not _is_future_import_first(node):
return True
for from_name, future_name in node.get_paths():
name = future_name.value
allowed_futures = list(ALLOWED_FUTURES)
if self._normalizer.version >= (3, 7):
allowed_futures.append('annotations')
if name == 'braces':
self.add_issue(node, message="not a chance")
elif name == 'barry_as_FLUFL':
m = "Seriously I'm not implementing this :) ~ Dave"
self.add_issue(node, message=m)
elif name not in allowed_futures:
message = "future feature %s is not defined" % name
self.add_issue(node, message=message)
@ErrorFinder.register_rule(type='star_expr')
| _FutureImportRule |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_deployments.py | {
"start": 41882,
"end": 43518
} | class ____:
async def test_read_deployment(
self,
client,
deployment,
):
response = await client.get(f"/deployments/{deployment.id}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["id"] == str(deployment.id)
assert response.json()["name"] == deployment.name
assert response.json()["flow_id"] == str(deployment.flow_id)
async def test_read_deployment_returns_404_if_does_not_exist(self, client):
response = await client.get(f"/deployments/{uuid4()}")
assert response.status_code == status.HTTP_404_NOT_FOUND
async def test_read_deployment_with_concurrency_limit(
self, session, client, deployment
):
update = DeploymentUpdate(concurrency_limit=4)
await models.deployments.update_deployment(session, deployment.id, update)
await session.commit()
response = await client.get(f"/deployments/{deployment.id}")
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
assert json_response["concurrency_limit"] is None, (
"Deprecated int-only field should be None for backwards-compatibility"
)
global_concurrency_limit = json_response.get("global_concurrency_limit")
assert global_concurrency_limit is not None
assert global_concurrency_limit.get("limit") == update.concurrency_limit
assert global_concurrency_limit.get("active") is True
assert (
global_concurrency_limit.get("name") == f"deployment:{json_response['id']}"
)
| TestReadDeployment |
python | doocs__leetcode | solution/0600-0699/0645.Set Mismatch/Solution2.py | {
"start": 0,
"end": 310
} | class ____:
def findErrorNums(self, nums: List[int]) -> List[int]:
cnt = Counter(nums)
n = len(nums)
ans = [0] * 2
for x in range(1, n + 1):
if cnt[x] == 2:
ans[0] = x
if cnt[x] == 0:
ans[1] = x
return ans
| Solution |
python | pytorch__pytorch | torchgen/api/autograd.py | {
"start": 9554,
"end": 9746
} | class ____:
name: str
type: Type
# TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
cpp_type: str
@dataclass(frozen=True)
| DifferentiableOutput |
python | realpython__materials | python-mappings/pizza_menu.py | {
"start": 45,
"end": 1765
} | class ____(MutableMapping):
def __init__(self, menu: dict):
self._menu = {}
self._first_letters = {}
for key, value in menu.items():
first_letter = key[0].lower()
if first_letter in self._first_letters:
self._raise_duplicate_key_error(key)
self._first_letters[first_letter] = key
self._menu[key] = value
def _raise_duplicate_key_error(self, key):
raise ValueError(
f"'{key}' is invalid. All pizzas must have unique first letters"
)
def __getitem__(self, key):
if key not in self._menu and len(key) > 1:
raise KeyError(key)
key = self._first_letters.get(key[0].lower(), key)
return self._menu[key]
def __setitem__(self, key, value):
first_letter = key[0].lower()
if len(key) == 1:
key = self._first_letters.get(first_letter, key)
if key in self._menu:
self._menu[key] = value
elif first_letter in self._first_letters:
self._raise_duplicate_key_error(key)
else:
self._first_letters[first_letter] = key
self._menu[key] = value
def __delitem__(self, key):
if key not in self._menu and len(key) > 1:
raise KeyError(key)
key = self._first_letters.pop(key[0].lower(), key)
del self._menu[key]
def __iter__(self):
return iter(self._menu)
def __len__(self):
return len(self._menu)
def __repr__(self):
return f"{self.__class__.__name__}({self._menu})"
def __str__(self):
return str(self._menu)
def __contains__(self, key):
return key in self._menu
| PizzaMenu |
python | pytorch__pytorch | test/dynamo/test_autograd_function.py | {
"start": 4075,
"end": 4318
} | class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, foo):
return torch.add(foo, foo)
@staticmethod
def backward(ctx, grad_output):
return grad_output * grad_output.stride()[-1]
| CustomFuncStrideBwd |
python | bokeh__bokeh | src/bokeh/core/query.py | {
"start": 6554,
"end": 6941
} | class ____(_Operator):
''' Form disjunctions from other query predicates.
Construct an ``OR`` expression by making a dict with ``OR`` as the key,
and a list of other query expressions as the value:
.. code-block:: python
# matches any Axis subclasses or models with .name == "mycircle"
{ OR: [dict(type=Axis), dict(name="mycircle")] }
'''
pass
| OR |
python | euske__pdfminer | pdfminer/layout.py | {
"start": 9015,
"end": 9890
} | class ____(LTTextLine):
def __init__(self, word_margin):
LTTextLine.__init__(self, word_margin)
self._x1 = +INF
return
def add(self, obj):
if isinstance(obj, LTChar) and self.word_margin:
margin = self.word_margin * max(obj.width, obj.height)
if self._x1 < obj.x0-margin:
LTContainer.add(self, LTAnno(' '))
self._x1 = obj.x1
LTTextLine.add(self, obj)
return
def find_neighbors(self, plane, ratio):
d = ratio*self.height
objs = plane.find((self.x0, self.y0-d, self.x1, self.y1+d))
return [obj for obj in objs
if (isinstance(obj, LTTextLineHorizontal) and
abs(obj.height-self.height) < d and
(abs(obj.x0-self.x0) < d or
abs(obj.x1-self.x1) < d))]
| LTTextLineHorizontal |
python | google__pytype | pytype/tools/merge_pyi/test_data/typevar.py | {
"start": 21,
"end": 94
} | class ____:
def __init__(self, initialdata = None):
pass
| UserDict |
python | django-haystack__django-haystack | test_haystack/test_indexes.py | {
"start": 25041,
"end": 29959
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.sb = connections["default"].get_backend()
self.bmsi = BasicModelSearchIndex()
self.fmsi = FieldsModelSearchIndex()
self.emsi = ExcludesModelSearchIndex()
self.fwomsi = FieldsWithOverrideModelSearchIndex()
self.yabmsi = YetAnotherBasicModelSearchIndex()
self.m2mmsi = ModelWithManyToManyFieldModelSearchIndex()
def test_basic(self):
self.assertEqual(len(self.bmsi.fields), 4)
self.assertTrue("foo" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["foo"], indexes.CharField))
self.assertEqual(self.bmsi.fields["foo"].null, False)
self.assertEqual(self.bmsi.fields["foo"].index_fieldname, "foo")
self.assertTrue("author" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["author"], indexes.CharField))
self.assertEqual(self.bmsi.fields["author"].null, False)
self.assertTrue("pub_date" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue(
isinstance(self.bmsi.fields["pub_date"].default, datetime.datetime)
)
self.assertTrue("text" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["text"], indexes.CharField))
self.assertEqual(self.bmsi.fields["text"].document, True)
self.assertEqual(self.bmsi.fields["text"].use_template, True)
def test_fields(self):
self.assertEqual(len(self.fmsi.fields), 3)
self.assertTrue("author" in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields["author"], indexes.CharField))
self.assertTrue("pub_date" in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("text" in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields["text"], indexes.CharField))
def test_excludes(self):
self.assertEqual(len(self.emsi.fields), 2)
self.assertTrue("pub_date" in self.emsi.fields)
self.assertTrue(isinstance(self.emsi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("text" in self.emsi.fields)
self.assertTrue(isinstance(self.emsi.fields["text"], indexes.CharField))
self.assertNotIn("related_models", self.m2mmsi.fields)
def test_fields_with_override(self):
self.assertEqual(len(self.fwomsi.fields), 3)
self.assertTrue("author" in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields["author"], indexes.CharField))
self.assertTrue("foo" in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields["foo"], indexes.IntegerField))
self.assertTrue("text" in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields["text"], indexes.CharField))
def test_overriding_field_name_with_get_index_fieldname(self):
self.assertTrue(self.fwomsi.fields["foo"].index_fieldname, "foo")
self.assertTrue(self.fwomsi.fields["author"].index_fieldname, "author_bar")
def test_float_integer_fields(self):
self.assertEqual(len(self.yabmsi.fields), 5)
self.assertEqual(
sorted(self.yabmsi.fields.keys()),
["author", "average_delay", "pub_date", "text", "view_count"],
)
self.assertTrue("author" in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields["author"], indexes.CharField))
self.assertEqual(self.yabmsi.fields["author"].null, False)
self.assertTrue("pub_date" in self.yabmsi.fields)
self.assertTrue(
isinstance(self.yabmsi.fields["pub_date"], indexes.DateTimeField)
)
self.assertTrue(
isinstance(self.yabmsi.fields["pub_date"].default, datetime.datetime)
)
self.assertTrue("text" in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields["text"], indexes.CharField))
self.assertEqual(self.yabmsi.fields["text"].document, True)
self.assertEqual(self.yabmsi.fields["text"].use_template, False)
self.assertTrue("view_count" in self.yabmsi.fields)
self.assertTrue(
isinstance(self.yabmsi.fields["view_count"], indexes.IntegerField)
)
self.assertEqual(self.yabmsi.fields["view_count"].null, False)
self.assertEqual(self.yabmsi.fields["view_count"].index_fieldname, "view_count")
self.assertTrue("average_delay" in self.yabmsi.fields)
self.assertTrue(
isinstance(self.yabmsi.fields["average_delay"], indexes.FloatField)
)
self.assertEqual(self.yabmsi.fields["average_delay"].null, False)
self.assertEqual(
self.yabmsi.fields["average_delay"].index_fieldname, "average_delay"
)
| ModelSearchIndexTestCase |
python | wandb__wandb | wandb/vendor/pygments/lexers/crystal.py | {
"start": 708,
"end": 16845
} | class ____(ExtendedRegexLexer):
"""
For `Crystal <http://crystal-lang.org>`_ source code.
.. versionadded:: 2.2
"""
name = 'Crystal'
aliases = ['cr', 'crystal']
filenames = ['*.cr']
mimetypes = ['text/x-crystal']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Crystal...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), String.Delimiter, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs
for i, t, v in self.get_tokens_unprocessed(context=ctx):
yield i, t, v
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), String.Delimiter, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_crystalstrings_rules():
def intp_regex_callback(self, match, ctx):
yield match.start(1), String.Regex, match.group(1) # begin
nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Regex, match.group(4) # end[imsx]*
ctx.pos = match.end()
def intp_string_callback(self, match, ctx):
yield match.start(1), String.Other, match.group(1)
nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Other, match.group(4) # end
ctx.pos = match.end()
states = {}
states['strings'] = [
(r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol),
(words(CRYSTAL_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol),
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
# This allows arbitrary text after '\ for simplicity
(r"'(\\\\|\\'|[^']|\\[^'\\]+)'", String.Char),
(r':"', String.Symbol, 'simple-sym'),
# Crystal doesn't have "symbol:"s but this simplifies function args
(r'([a-zA-Z_]\w*)(:)(?!:)', bygroups(String.Symbol, Punctuation)),
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-escaped' if name == 'sym' else 'string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# braced quoted strings
for lbrace, rbrace, bracecc, name in \
('\\{', '\\}', '{}', 'cb'), \
('\\[', '\\]', '\\[\\]', 'sb'), \
('\\(', '\\)', '()', 'pa'), \
('<', '>', '<>', 'ab'):
states[name+'-intp-string'] = [
(r'\\[' + lbrace + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
# http://crystal-lang.org/docs/syntax_and_semantics/literals/array.html
states['strings'].append((r'%[wi]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + bracecc + ']', String.Regex),
(lbrace, String.Regex, '#push'),
(rbrace + '[imsx]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + bracecc + ']', String.Regex),
(r'[^\\#' + bracecc + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
# these must come after %<brace>!
states['strings'] += [
# %r regex
(r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[imsx]*)',
intp_regex_callback),
# regular fancy strings with qsw
(r'(%[wi]([\W_]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
# special forms of fancy strings after operators or
# in method calls with braces
(r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# and because of fixed width lookbehinds the whole thing a
# second time for line startings...
(r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# all regular fancy strings without qsw
(r'(%([\[{(<]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
]
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
# keywords
(words('''
abstract asm as begin break case do else elsif end ensure extend ifdef if
include instance_sizeof next of pointerof private protected rescue return
require sizeof super then typeof unless until when while with yield
'''.split(), suffix=r'\b'), Keyword),
(words(['true', 'false', 'nil'], suffix=r'\b'), Keyword.Constant),
# start of function, class and module names
(r'(module|lib)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(def|fun|macro)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Text, Name.Namespace), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(class|struct|union|type|alias|enum)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Text, Name.Namespace), 'classname'),
(r'(self|out|uninitialized)\b|(is_a|responds_to)\?', Keyword.Pseudo),
# macros
(words('''
debugger record pp assert_responds_to spawn parallel
getter setter property delegate def_hash def_equals def_equals_and_hash
forward_missing_to
'''.split(), suffix=r'\b'), Name.Builtin.Pseudo),
(r'getter[!?]|property[!?]|__(DIR|FILE|LINE)__\b', Name.Builtin.Pseudo),
# builtins
# http://crystal-lang.org/api/toplevel.html
(words('''
Object Value Struct Reference Proc Class Nil Symbol Enum Void
Bool Number Int Int8 Int16 Int32 Int64 UInt8 UInt16 UInt32 UInt64
Float Float32 Float64 Char String
Pointer Slice Range Exception Regex
Mutex StaticArray Array Hash Set Tuple Deque Box Process File
Dir Time Channel Concurrent Scheduler
abort at_exit caller delay exit fork future get_stack_top gets
lazy loop main p print printf puts
raise rand read_line sleep sprintf system with_color
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!:])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=\.index\s)|'
r'(?<=\.scan\s)|'
r'(?<=\.sub\s)|'
r'(?<=\.sub!\s)|'
r'(?<=\.gsub\s)|'
r'(?<=\.gsub!\s)|'
r'(?<=\.match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls or subscripts)
(r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex),
'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0o[0-7]+(?:_[0-7]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0b[01]+(?:_[01]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
# 3 separate expressions for floats because any of the 3 optional
# parts makes it a float
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Text, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Text, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+))(\s*)([/?])?',
bygroups(Number.Float, Text, Operator)),
(r'(0\b|[1-9][\d]*(?:_\d+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
# Names
(r'@@[a-zA-Z_]\w*', Name.Variable.Class),
(r'@[a-zA-Z_]\w*', Name.Variable.Instance),
(r'\$\w+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# chars
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\befnrtv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z][A-Z_]+\b', Name.Constant),
# macro expansion
(r'\{%', String.Interpol, 'in-macro-control'),
(r'\{\{', String.Interpol, 'in-macro-expr'),
# attributes
(r'(@\[)(\s*)([A-Z]\w*)',
bygroups(Operator, Text, Name.Decorator), 'in-attr'),
# this is needed because Crystal attributes can look
# like keywords (class) or like this: ` ?!?
(words(CRYSTAL_OPERATORS, prefix=r'(\.|::)'),
bygroups(Operator, Name.Operator)),
(r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
bygroups(Operator, Name)),
# Names can end with [!?] unless it's "!="
(r'[a-zA-Z_]\w*(?:[!?](?!=))?', Name),
(r'(\[|\]\??|\*\*|<=>?|>=|<<?|>>?|=~|===|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Text)
],
'funcname': [
(r'(?:([a-zA-Z_]\w*)(\.))?'
r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
default('#pop')
],
'classname': [
(r'[A-Z_]\w*', Name.Class),
(r'(\()(\s*)([A-Z_]\w*)(\s*)(\))',
bygroups(Punctuation, Text, Name.Class, Text, Punctuation)),
default('#pop')
],
'in-intp': [
(r'\{', String.Interpol, '#push'),
(r'\}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#\{', String.Interpol, 'in-intp'),
],
'string-escaped': [
(r'\\([\\befnstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', String.Escape)
],
'string-intp-escaped': [
include('string-intp'),
include('string-escaped'),
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[imsx]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
],
'in-macro-control': [
(r'\{%', String.Interpol, '#push'),
(r'%\}', String.Interpol, '#pop'),
(r'for\b|in\b', Keyword),
include('root'),
],
'in-macro-expr': [
(r'\{\{', String.Interpol, '#push'),
(r'\}\}', String.Interpol, '#pop'),
include('root'),
],
'in-attr': [
(r'\[', Operator, '#push'),
(r'\]', Operator, '#pop'),
include('root'),
],
}
tokens.update(gen_crystalstrings_rules())
| CrystalLexer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/types.py | {
"start": 9377,
"end": 10039
} | class ____(_IntegerType, sqltypes.INTEGER):
"""MySQL INTEGER type."""
__visit_name__ = "INTEGER"
def __init__(self, display_width: Optional[int] = None, **kw: Any):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super().__init__(display_width=display_width, **kw)
| INTEGER |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 8317,
"end": 8759
} | class ____:
"""
Table 4.5.
Table 11 in the 2.0 reference.
"""
K = "/K" # integer
END_OF_LINE = "/EndOfLine" # boolean
ENCODED_BYTE_ALIGN = "/EncodedByteAlign" # boolean
COLUMNS = "/Columns" # integer
ROWS = "/Rows" # integer
END_OF_BLOCK = "/EndOfBlock" # boolean
BLACK_IS_1 = "/BlackIs1" # boolean
DAMAGED_ROWS_BEFORE_ERROR = "/DamagedRowsBeforeError" # integer
| CcittFaxDecodeParameters |
python | pallets__werkzeug | src/werkzeug/datastructures/cache_control.py | {
"start": 5234,
"end": 7250
} | class ____(ImmutableDictMixin[str, t.Optional[str]], _CacheControl): # type: ignore[misc]
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionchanged:: 3.1
Dict values are always ``str | None``. Setting properties will
convert the value to a string. Setting a non-bool property to
``False`` is equivalent to setting it to ``None``. Getting typed
properties will return ``None`` if conversion raises
``ValueError``, rather than the string.
.. versionchanged:: 3.1
``max_age`` is ``None`` if present without a value, rather
than ``-1``.
.. versionchanged:: 3.1
``no_cache`` is a boolean, it is ``True`` instead of ``"*"``
when present.
.. versionchanged:: 3.1
``max_stale`` is ``True`` if present without a value, rather
than ``"*"``.
.. versionchanged:: 3.1
``no_transform`` is a boolean. Previously it was mistakenly
always ``None``.
.. versionchanged:: 3.1
``min_fresh`` is ``None`` if present without a value, rather
than ``"*"``.
.. versionchanged:: 2.1
Setting int properties such as ``max_age`` will convert the
value to an int.
.. versionadded:: 0.5
Response-only properties are not present on this request class.
"""
no_cache: bool = cache_control_property("no-cache", None, bool)
max_stale: int | t.Literal[True] | None = cache_control_property(
"max-stale",
True,
int,
)
min_fresh: int | None = cache_control_property("min-fresh", None, int)
only_if_cached: bool = cache_control_property("only-if-cached", None, bool)
| RequestCacheControl |
python | apache__thrift | lib/py/src/transport/TTransport.py | {
"start": 6953,
"end": 7151
} | class ____(object):
"""Factory transport that builds framed transports"""
def getTransport(self, trans):
framed = TFramedTransport(trans)
return framed
| TFramedTransportFactory |
python | kamyu104__LeetCode-Solutions | Python/add-and-search-word-data-structure-design.py | {
"start": 60,
"end": 209
} | class ____(object):
# Initialize your data structure here.
def __init__(self):
self.is_string = False
self.leaves = {}
| TrieNode |
python | joke2k__faker | faker/providers/__init__.py | {
"start": 570,
"end": 23485
} | class ____:
__provider__ = "base"
__lang__: Optional[str] = None
__use_weighting__ = False
# Locales supported by Linux Mint from `/usr/share/i18n/SUPPORTED`
language_locale_codes = {
"aa": ("DJ", "ER", "ET"),
"af": ("ZA",),
"ak": ("GH",),
"am": ("ET",),
"an": ("ES",),
"apn": ("IN",),
"ar": (
"AE",
"BH",
"DJ",
"DZ",
"EG",
"EH",
"ER",
"IL",
"IN",
"IQ",
"JO",
"KM",
"KW",
"LB",
"LY",
"MA",
"MR",
"OM",
"PS",
"QA",
"SA",
"SD",
"SO",
"SS",
"SY",
"TD",
"TN",
"YE",
),
"as": ("IN",),
"ast": ("ES",),
"ayc": ("PE",),
"az": ("AZ", "IN"),
"be": ("BY",),
"bem": ("ZM",),
"ber": ("DZ", "MA"),
"bg": ("BG",),
"bhb": ("IN",),
"bho": ("IN",),
"bn": ("BD", "IN"),
"bo": ("CN", "IN"),
"br": ("FR",),
"brx": ("IN",),
"bs": ("BA",),
"byn": ("ER",),
"ca": ("AD", "ES", "FR", "IT"),
"ce": ("RU",),
"ckb": ("IQ",),
"cmn": ("TW",),
"crh": ("UA",),
"cs": ("CZ",),
"csb": ("PL",),
"cv": ("RU",),
"cy": ("GB",),
"da": ("DK",),
"de": ("AT", "BE", "CH", "DE", "LI", "LU"),
"doi": ("IN",),
"dv": ("MV",),
"dz": ("BT",),
"el": ("GR", "CY"),
"en": (
"AG",
"AU",
"BD",
"BW",
"CA",
"DK",
"GB",
"HK",
"IE",
"IN",
"NG",
"NZ",
"PH",
"PK",
"SG",
"US",
"ZA",
"ZM",
"ZW",
"KE",
),
"eo": ("US",),
"es": (
"AR",
"BO",
"CL",
"CO",
"CR",
"CU",
"DO",
"EC",
"ES",
"GT",
"HN",
"MX",
"NI",
"PA",
"PE",
"PR",
"PY",
"SV",
"US",
"UY",
"VE",
),
"et": ("EE",),
"eu": ("ES", "FR"),
"fa": ("IR",),
"ff": ("SN",),
"fi": ("FI",),
"fil": ("PH",),
"fo": ("FO",),
"fr": ("CA", "CH", "FR", "LU"),
"fur": ("IT",),
"fy": ("NL", "DE"),
"ga": ("IE",),
"gd": ("GB",),
"gez": ("ER", "ET"),
"gl": ("ES",),
"gu": ("IN",),
"gv": ("GB",),
"ha": ("NG",),
"hak": ("TW",),
"he": ("IL",),
"hi": ("IN",),
"hne": ("IN",),
"hr": ("HR",),
"hsb": ("DE",),
"ht": ("HT",),
"hu": ("HU",),
"hy": ("AM",),
"ia": ("FR",),
"id": ("ID",),
"ig": ("NG",),
"ik": ("CA",),
"is": ("IS",),
"it": ("CH", "IT"),
"iu": ("CA",),
"iw": ("IL",),
"ja": ("JP",),
"ka": ("GE",),
"kk": ("KZ",),
"kl": ("GL",),
"km": ("KH",),
"kn": ("IN",),
"ko": ("KR",),
"kok": ("IN",),
"ks": ("IN",),
"ku": ("TR",),
"kw": ("GB",),
"ky": ("KG",),
"lb": ("LU",),
"lg": ("UG",),
"li": ("BE", "NL"),
"lij": ("IT",),
"ln": ("CD",),
"lo": ("LA",),
"lt": ("LT",),
"lv": ("LV",),
"lzh": ("TW",),
"mag": ("IN",),
"mai": ("IN",),
"mg": ("MG",),
"mhr": ("RU",),
"mi": ("NZ",),
"mk": ("MK",),
"ml": ("IN",),
"mn": ("MN",),
"mni": ("IN",),
"mr": ("IN",),
"ms": ("MY",),
"mt": ("MT",),
"my": ("MM",),
"nan": ("TW",),
"nb": ("NO",),
"nds": ("DE", "NL"),
"ne": ("NP",),
"nhn": ("MX",),
"niu": ("NU", "NZ"),
"nl": ("AW", "BE", "NL"),
"nn": ("NO",),
"nr": ("ZA",),
"nso": ("ZA",),
"oc": ("FR",),
"om": ("ET", "KE"),
"or": ("IN",),
"os": ("RU",),
"pa": ("IN", "PK"),
"pap": ("AN", "AW", "CW"),
"pl": ("PL",),
"ps": ("AF",),
"pt": ("BR", "PT"),
"quz": ("PE",),
"raj": ("IN",),
"ro": ("RO",),
"ru": ("RU", "UA"),
"rw": ("RW",),
"sa": ("IN",),
"sat": ("IN",),
"sc": ("IT",),
"sd": ("IN", "PK"),
"se": ("NO",),
"shs": ("CA",),
"si": ("LK",),
"sid": ("ET",),
"sk": ("SK",),
"sl": ("SI",),
"so": ("DJ", "ET", "KE", "SO"),
"sq": ("AL", "ML"),
"sr": ("ME", "RS"),
"ss": ("ZA",),
"st": ("ZA",),
"sv": ("FI", "SE"),
"sw": ("KE", "TZ"),
"szl": ("PL",),
"ta": ("IN", "LK"),
"tcy": ("IN",),
"te": ("IN",),
"tg": ("TJ",),
"th": ("TH",),
"the": ("NP",),
"ti": ("ER", "ET"),
"tig": ("ER",),
"tk": ("TM",),
"tl": ("PH",),
"tn": ("ZA",),
"tr": ("CY", "TR"),
"ts": ("ZA",),
"tt": ("RU",),
"ug": ("CN",),
"uk": ("UA",),
"unm": ("US",),
"ur": ("IN", "PK"),
"uz": ("UZ",),
"ve": ("ZA",),
"vi": ("VN",),
"wa": ("BE",),
"wae": ("CH",),
"wal": ("ET",),
"wo": ("SN",),
"xh": ("ZA",),
"yi": ("US",),
"yo": ("NG",),
"yue": ("HK",),
"zh": ("CN", "HK", "SG", "TW"),
"zu": ("ZA",),
}
def __init__(self, generator: Any) -> None:
"""
Base class for fake data providers
:param generator: `Generator` instance
"""
self.generator = generator
def locale(self) -> str:
"""Generate a random underscored i18n locale code (e.g. en_US)."""
language_code = self.language_code()
return (
language_code
+ "_"
+ self.random_element(
BaseProvider.language_locale_codes[language_code],
)
)
def language_code(self) -> str:
"""Generate a random i18n language code (e.g. en)."""
return self.random_element(BaseProvider.language_locale_codes.keys())
def random_int(self, min: int = 0, max: int = 9999, step: int = 1) -> int:
"""Generate a random integer between two integers ``min`` and ``max`` inclusive
while observing the provided ``step`` value.
This method is functionally equivalent to randomly sampling an integer
from the sequence ``range(min, max + 1, step)``.
:sample: min=0, max=15
:sample: min=0, max=15, step=3
"""
return self.generator.random.randrange(min, max + 1, step)
def random_digit(self) -> int:
"""Generate a random digit (0 to 9)."""
return self.generator.random.randint(0, 9)
def random_digit_not_null(self) -> int:
"""Generate a random non-zero digit (1 to 9)."""
return self.generator.random.randint(1, 9)
def random_digit_above_two(self) -> int:
"""Generate a random digit above value two (2 to 9)."""
return self.generator.random.randint(2, 9)
def random_digit_or_empty(self) -> Union[int, str]:
"""Generate a random digit (0 to 9) or an empty string.
This method will return an empty string 50% of the time,
and each digit has a 1/20 chance of being generated.
"""
if self.generator.random.randint(0, 1):
return self.generator.random.randint(0, 9)
else:
return ""
def random_digit_not_null_or_empty(self) -> Union[int, str]:
"""Generate a random non-zero digit (1 to 9) or an empty string.
This method will return an empty string 50% of the time,
and each digit has a 1/18 chance of being generated.
"""
if self.generator.random.randint(0, 1):
return self.generator.random.randint(1, 9)
else:
return ""
def random_number(self, digits: Optional[int] = None, fix_len: bool = False) -> int:
"""Generate a random integer according to the following rules:
- If ``digits`` is ``None`` (default), its value will be set to a random
integer from 1 to 9.
- If ``fix_len`` is ``False`` (default), all integers that do not exceed
the number of ``digits`` can be generated.
- If ``fix_len`` is ``True``, only integers with the exact number of
``digits`` can be generated.
:sample: fix_len=False
:sample: fix_len=True
:sample: digits=3
:sample: digits=3, fix_len=False
:sample: digits=3, fix_len=True
"""
if digits is None:
digits = self.random_digit_not_null()
if digits < 0:
raise ValueError("The digit parameter must be greater than or equal to 0.")
if fix_len:
if digits > 0:
return self.generator.random.randint(pow(10, digits - 1), pow(10, digits) - 1)
else:
raise ValueError("A number of fixed length cannot have less than 1 digit in it.")
else:
return self.generator.random.randint(0, pow(10, digits) - 1)
def random_letter(self) -> str:
"""Generate a random ASCII letter (a-z and A-Z)."""
return self.generator.random.choice(getattr(string, "letters", string.ascii_letters))
def random_letters(self, length: int = 16) -> Sequence[str]:
"""Generate a list of random ASCII letters (a-z and A-Z) of the specified ``length``.
:sample: length=10
"""
return self.random_choices(
getattr(string, "letters", string.ascii_letters),
length=length,
)
def random_lowercase_letter(self) -> str:
"""Generate a random lowercase ASCII letter (a-z)."""
return self.generator.random.choice(string.ascii_lowercase)
def random_uppercase_letter(self) -> str:
"""Generate a random uppercase ASCII letter (A-Z)."""
return self.generator.random.choice(string.ascii_uppercase)
def random_elements(
self,
elements: ElementsType[T] = ("a", "b", "c"), # type: ignore[assignment]
length: Optional[int] = None,
unique: bool = False,
use_weighting: Optional[bool] = None,
) -> Sequence[T]:
"""Generate a list of randomly sampled objects from ``elements``.
Set ``unique`` to ``False`` for random sampling with replacement, and set ``unique`` to
``True`` for random sampling without replacement.
If ``length`` is set to ``None`` or is omitted, ``length`` will be set to a random
integer from 1 to the size of ``elements``.
The value of ``length`` cannot be greater than the number of objects
in ``elements`` if ``unique`` is set to ``True``.
The value of ``elements`` can be any sequence type (``list``, ``tuple``, ``set``,
``string``, etc) or an ``OrderedDict`` type. If it is the latter, the keys will be
used as the objects for sampling, and the values will be used as weighted probabilities
if ``unique`` is set to ``False``. For example:
.. code-block:: python
# Random sampling with replacement
fake.random_elements(
elements=OrderedDict([
("variable_1", 0.5), # Generates "variable_1" 50% of the time
("variable_2", 0.2), # Generates "variable_2" 20% of the time
("variable_3", 0.2), # Generates "variable_3" 20% of the time
("variable_4": 0.1), # Generates "variable_4" 10% of the time
]), unique=False
)
# Random sampling without replacement (defaults to uniform distribution)
fake.random_elements(
elements=OrderedDict([
("variable_1", 0.5),
("variable_2", 0.2),
("variable_3", 0.2),
("variable_4": 0.1),
]), unique=True
)
:sample: elements=('a', 'b', 'c', 'd'), unique=False
:sample: elements=('a', 'b', 'c', 'd'), unique=True
:sample: elements=('a', 'b', 'c', 'd'), length=10, unique=False
:sample: elements=('a', 'b', 'c', 'd'), length=4, unique=True
:sample: elements=OrderedDict([
("a", 0.45),
("b", 0.35),
("c", 0.15),
("d", 0.05),
]), length=20, unique=False
:sample: elements=OrderedDict([
("a", 0.45),
("b", 0.35),
("c", 0.15),
("d", 0.05),
]), unique=True
"""
use_weighting = use_weighting if use_weighting is not None else self.__use_weighting__
if isinstance(elements, dict) and not isinstance(elements, OrderedDict):
raise ValueError("Use OrderedDict only to avoid dependency on PYTHONHASHSEED (See #363).")
fn = choices_distribution_unique if unique else choices_distribution
if length is None:
length = self.generator.random.randint(1, len(elements))
if unique and length > len(elements):
raise ValueError("Sample length cannot be longer than the number of unique elements to pick from.")
if isinstance(elements, dict):
if not hasattr(elements, "_key_cache"):
elements._key_cache = tuple(elements.keys()) # type: ignore
choices = elements._key_cache # type: ignore[attr-defined, union-attr]
probabilities = tuple(elements.values()) if use_weighting else None
else:
if unique:
# shortcut
return self.generator.random.sample(elements, length)
choices = elements
probabilities = None
return fn(
tuple(choices),
probabilities,
self.generator.random,
length=length,
)
def random_choices(
self,
elements: ElementsType[T] = ("a", "b", "c"), # type: ignore[assignment]
length: Optional[int] = None,
) -> Sequence[T]:
"""Generate a list of objects randomly sampled from ``elements`` with replacement.
For information on the ``elements`` and ``length`` arguments, please refer to
:meth:`random_elements() <faker.providers.BaseProvider.random_elements>` which
is used under the hood with the ``unique`` argument explicitly set to ``False``.
:sample: elements=('a', 'b', 'c', 'd')
:sample: elements=('a', 'b', 'c', 'd'), length=10
:sample: elements=OrderedDict([
("a", 0.45),
("b", 0.35),
("c", 0.15),
("d", 0.05),
])
:sample: elements=OrderedDict([
("a", 0.45),
("b", 0.35),
("c", 0.15),
("d", 0.05),
]), length=20
"""
return self.random_elements(elements, length, unique=False)
def random_element(self, elements: ElementsType[T] = ("a", "b", "c")) -> T: # type: ignore[assignment]
"""Generate a randomly sampled object from ``elements``.
For information on the ``elements`` argument, please refer to
:meth:`random_elements() <faker.providers.BaseProvider.random_elements>` which
is used under the hood with the ``unique`` argument set to ``False`` and the
``length`` argument set to ``1``.
:sample: elements=('a', 'b', 'c', 'd')
:sample size=10: elements=OrderedDict([
("a", 0.45),
("b", 0.35),
("c", 0.15),
("d", 0.05),
])
"""
return self.random_elements(elements, length=1)[0]
def random_sample(
self, elements: ElementsType[T] = ("a", "b", "c"), length: Optional[int] = None # type: ignore[assignment]
) -> Sequence[T]:
"""Generate a list of objects randomly sampled from ``elements`` without replacement.
For information on the ``elements`` and ``length`` arguments, please refer to
:meth:`random_elements() <faker.providers.BaseProvider.random_elements>` which
is used under the hood with the ``unique`` argument explicitly set to ``True``.
:sample: elements=('a', 'b', 'c', 'd', 'e', 'f')
:sample: elements=('a', 'b', 'c', 'd', 'e', 'f'), length=3
"""
return self.random_elements(elements, length, unique=True)
def randomize_nb_elements(
self,
number: int = 10,
le: bool = False,
ge: bool = False,
min: Optional[int] = None,
max: Optional[int] = None,
) -> int:
"""Generate a random integer near ``number`` according to the following rules:
- If ``le`` is ``False`` (default), allow generation up to 140% of ``number``.
If ``True``, upper bound generation is capped at 100%.
- If ``ge`` is ``False`` (default), allow generation down to 60% of ``number``.
If ``True``, lower bound generation is capped at 100%.
- If a numerical value for ``min`` is provided, generated values less than ``min``
will be clamped at ``min``.
- If a numerical value for ``max`` is provided, generated values greater than
``max`` will be clamped at ``max``.
- If both ``le`` and ``ge`` are ``True``, the value of ``number`` will automatically
be returned, regardless of the values supplied for ``min`` and ``max``.
:sample: number=100
:sample: number=100, ge=True
:sample: number=100, ge=True, min=120
:sample: number=100, le=True
:sample: number=100, le=True, max=80
:sample: number=79, le=True, ge=True, min=80
"""
if le and ge:
return number
_min = 100 if ge else 60
_max = 100 if le else 140
nb = int(number * self.generator.random.randint(_min, _max) / 100)
if min is not None and nb < min:
nb = min
if max is not None and nb > max:
nb = max
return nb
def numerify(self, text: str = "###") -> str:
"""Generate a string with each placeholder in ``text`` replaced according
to the following rules:
- Number signs ('#') are replaced with a random digit (0 to 9).
- Percent signs ('%') are replaced with a random non-zero digit (1 to 9).
- Dollar signs ('$') are replaced with a random digit above two (2 to 9).
- Exclamation marks ('!') are replaced with a random digit or an empty string.
- At symbols ('@') are replaced with a random non-zero digit or an empty string.
Under the hood, this method uses :meth:`random_digit() <faker.providers.BaseProvider.random_digit>`,
:meth:`random_digit_not_null() <faker.providers.BaseProvider.random_digit_not_null>`,
:meth:`random_digit_or_empty() <faker.providers.BaseProvider.random_digit_or_empty>`,
and :meth:`random_digit_not_null_or_empty() <faker.providers.BaseProvider.random_digit_not_null_or_empty>`
to generate the random values.
:sample: text='Intel Core i%-%%##K vs AMD Ryzen % %%##X'
:sample: text='!!! !!@ !@! !@@ @!! @!@ @@! @@@'
"""
text = _re_hash.sub(lambda x: str(self.random_digit()), text)
text = _re_perc.sub(lambda x: str(self.random_digit_not_null()), text)
text = _re_dol.sub(lambda x: str(self.random_digit_above_two()), text)
text = _re_excl.sub(lambda x: str(self.random_digit_or_empty()), text)
text = _re_at.sub(lambda x: str(self.random_digit_not_null_or_empty()), text)
return text
def lexify(self, text: str = "????", letters: str = string.ascii_letters) -> str:
"""Generate a string with each question mark ('?') in ``text``
replaced with a random character from ``letters``.
By default, ``letters`` contains all ASCII letters, uppercase and lowercase.
:sample: text='Random Identifier: ??????????'
:sample: text='Random Identifier: ??????????', letters='ABCDE'
"""
return _re_qm.sub(lambda x: self.random_element(letters), text)
def bothify(self, text: str = "## ??", letters: str = string.ascii_letters) -> str:
"""Generate a string with each placeholder in ``text`` replaced according to the following rules:
- Number signs ('#') are replaced with a random digit (0 to 9).
- Percent signs ('%') are replaced with a random non-zero digit (1 to 9).
- Dollar signs ('$') are replaced with a random digit above two (2 to 9).
- Exclamation marks ('!') are replaced with a random digit or an empty string.
- At symbols ('@') are replaced with a random non-zero digit or an empty string.
- Question marks ('?') are replaced with a random character from ``letters``.
By default, ``letters`` contains all ASCII letters, uppercase and lowercase.
Under the hood, this method uses :meth:`numerify() <faker.providers.BaseProvider.numerify>` and
and :meth:`lexify() <faker.providers.BaseProvider.lexify>` to generate random values for number
signs and question marks respectively.
:sample: letters='ABCDE'
:sample: text='Product Number: ????-########'
:sample: text='Product Number: ????-########', letters='ABCDE'
:sample: text='Order: ##??-$'
"""
return self.lexify(self.numerify(text), letters=letters)
def hexify(self, text: str = "^^^^", upper: bool = False) -> str:
"""Generate a string with each circumflex ('^') in ``text``
replaced with a random hexadecimal character.
By default, ``upper`` is set to False. If set to ``True``, output
will be formatted using uppercase hexadecimal characters.
:sample: text='MAC Address: ^^:^^:^^:^^:^^:^^'
:sample: text='MAC Address: ^^:^^:^^:^^:^^:^^', upper=True
"""
letters = string.hexdigits[:-6]
if upper:
letters = letters.upper()
return _re_cir.sub(lambda x: self.random_element(letters), text)
| BaseProvider |
python | dagster-io__dagster | python_modules/libraries/dagster-sling/dagster_sling/resources.py | {
"start": 1548,
"end": 4034
} | class ____(PermissiveConfig):
"""A representation of a connection to a database or file to be used by Sling. This resource can be used as a source or a target for a Sling syncs.
Reference the Sling docs for more information on possible connection types and parameters: https://docs.slingdata.io/connections
The name of the connection is passed to Sling and must match the name of the connection provided in the replication configuration: https://docs.slingdata.io/sling-cli/run/configuration/replication
You may provide either a connection string or keyword arguments for the connection.
Examples:
Creating a Sling Connection for a file, such as CSV or JSON:
.. code-block:: python
source = SlingConnectionResource(name="MY_FILE", type="file")
Create a Sling Connection for a Postgres database, using a connection string:
.. code-block:: python
postgres_conn = SlingConnectionResource(name="MY_POSTGRES", type="postgres", connection_string=EnvVar("POSTGRES_CONNECTION_STRING"))
mysql_conn = SlingConnectionResource(name="MY_MYSQL", type="mysql", connection_string="mysql://user:password@host:port/schema")
Create a Sling Connection for a Postgres or Snowflake database, using keyword arguments:
.. code-block::python
postgres_conn = SlingConnectionResource(
name="MY_OTHER_POSRGRES",
type="postgres",
host="host",
user="hunter42",
password=EnvVar("POSTGRES_PASSWORD")
)
snowflake_conn = SlingConnectionResource(
name="MY_SNOWFLAKE",
type="snowflake",
host=EnvVar("SNOWFLAKE_HOST"),
user=EnvVar("SNOWFLAKE_USER"),
database=EnvVar("SNOWFLAKE_DATABASE"),
password=EnvVar("SNOWFLAKE_PASSWORD"),
role=EnvVar("SNOWFLAKE_ROLE")
)
"""
name: str = Field(
description="The name of the connection, must match the name in your Sling replication configuration."
)
type: str = Field(
description="Type of the source connection, must match the Sling connection types. Use 'file' for local storage."
)
connection_string: Optional[str] = Field(
description="The optional connection string for the source database, if not using keyword arguments.",
default=None,
)
| SlingConnectionResource |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/helpers/object_helpers.py | {
"start": 248,
"end": 1162
} | class ____(EnumMeta):
"""A metaclass for creating enums with case-insensitive keys."""
def __getitem__(cls, item):
try:
return super().__getitem__(item)
except Exception:
for key in cls._member_map_:
if key.casefold() == item.casefold():
return super().__getitem__(key)
def default_none_to_dict(value, key, obj):
"""Set the value of a key in a dictionary to an empty dictionary if the value is None.
Useful with pydash's set_with function.
e.g. set_with(obj, key, value, default_none_to_dict)
For more information, see https://github.com/dgilland/pydash/issues/122
Args:
value: The value to check.
key: The key to set in the dictionary.
obj: The dictionary to set the key in.
"""
if obj is None:
return
if value is None:
obj[key] = {}
| CaseInsensitiveKeys |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.