language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | joke2k__faker | faker/providers/company/zh_TW/__init__.py | {
"start": 45,
"end": 2041
} | class ____(CompanyProvider):
formats = ("{{company_prefix}}{{company_suffix}}",)
company_prefixes = (
"品王餐飲",
"一統企業",
"品誠",
"台灣電信",
"Goagle",
"一統星巴克",
"台日積體電路",
"榮長航空",
"台灣印無品良",
"華中航空",
"台灣人銀行",
"國中鋼鐵",
"海鴻精密",
"台灣鐵高",
"家宜家居(KIEA)",
"天上雜誌",
"台灣力電",
"碩華電腦",
"雄豹旅遊",
"光新三越百貨",
"台灣軟微",
"鐵台",
"一統超商",
"碁宏",
"創群光電(奇原美電子)",
"台灣酒菸",
"美奧廣告",
"AYHOO!摩奇",
"台灣台油",
"達宏國際電子",
"華晶國際酒店",
"秀威影城",
"王鼎餐飲集團",
"台灣五星電子",
"遊戲葡萄數位科技",
"橋子王生技",
"大八電視",
"台灣業糖",
"都亞緻麗",
"台灣來自水",
"麥當當",
"風微廣場",
"見遠雜誌",
"石金堂",
"邦城文化事業",
"華中郵政",
"達友光電",
"中台信託商業銀行",
"台北登來喜大飯店",
"全味食品工業",
"遠西百貨",
"旗花(台灣銀)行",
"冠智科技",
"丹味企業",
"發聯科技",
"台灣雅萊(Y'ORÉAL)",
"古太可口可樂",
"榮長海運",
"達廣電腦",
"華福大飯店",
"立三電視",
"星燦國際旅行社",
"衣優庫(Nuiqlo)",
"德汎",
"台北眾大捷運",
"共公電視",
"明陽海運",
"雄遠建設事業",
"台灣迪奧汽車",
"台灣地土銀行",
"天中電視",
"月日光半導體",
"塑台石化",
"樂可旅遊集團",
"信永藥品",
"輝燁企業",
"興復航空運輸",
"豐兆國際商業銀行",
"平太洋崇光百貨",
"神漢名店百貨",
"台灣士賓",
"賓國大飯店",
"業商週刊",
"台灣BIM",
"湖劍山世界",
"合作庫金商業銀行",
"台北邦富商業銀行",
"愛味之",
"邦富人壽保險",
"律理法律",
"心安食品服務(斯摩漢堡)",
"松黑",
"台灣生資堂",
"鮮爭",
"達台電子",
"聯燁鋼鐵",
"華聯電子",
"瑞輝大藥廠",
"隆豐大飯店(北台君悅)",
"資華粧業(生資堂)",
)
company_suffixes = ("", "有限公司", "股份有限公司", "資訊有限公司")
def company_prefix(self) -> str:
return self.random_element(self.company_prefixes)
| Provider |
python | numba__llvmlite | llvmlite/ir/values.py | {
"start": 2251,
"end": 10079
} | class ____(object):
"""
A mixin defining constant operations, for use in constant-like classes.
"""
#
# Arithmetic APIs
#
@_binop('shl')
def shl(self, other):
"""
Left integer shift:
lhs << rhs
"""
@_binop('lshr')
def lshr(self, other):
"""
Logical (unsigned) right integer shift:
lhs >> rhs
"""
@_binop('ashr')
def ashr(self, other):
"""
Arithmetic (signed) right integer shift:
lhs >> rhs
"""
@_binop('add')
def add(self, other):
"""
Integer addition:
lhs + rhs
"""
@_binop('fadd')
def fadd(self, other):
"""
Floating-point addition:
lhs + rhs
"""
@_binop('sub')
def sub(self, other):
"""
Integer subtraction:
lhs - rhs
"""
@_binop('fsub')
def fsub(self, other):
"""
Floating-point subtraction:
lhs - rhs
"""
@_binop('mul')
def mul(self, other):
"""
Integer multiplication:
lhs * rhs
"""
@_binop('fmul')
def fmul(self, other):
"""
Floating-point multiplication:
lhs * rhs
"""
@_binop('udiv')
def udiv(self, other):
"""
Unsigned integer division:
lhs / rhs
"""
@_binop('sdiv')
def sdiv(self, other):
"""
Signed integer division:
lhs / rhs
"""
@_binop('fdiv')
def fdiv(self, other):
"""
Floating-point division:
lhs / rhs
"""
@_binop('urem')
def urem(self, other):
"""
Unsigned integer remainder:
lhs % rhs
"""
@_binop('srem')
def srem(self, other):
"""
Signed integer remainder:
lhs % rhs
"""
@_binop('frem')
def frem(self, other):
"""
Floating-point remainder:
lhs % rhs
"""
@_binop('or')
def or_(self, other):
"""
Bitwise integer OR:
lhs | rhs
"""
@_binop('and')
def and_(self, other):
"""
Bitwise integer AND:
lhs & rhs
"""
@_binop('xor')
def xor(self, other):
"""
Bitwise integer XOR:
lhs ^ rhs
"""
def _cmp(self, prefix, sign, cmpop, other):
ins = prefix + 'cmp'
try:
op = _CMP_MAP[cmpop]
except KeyError:
raise ValueError("invalid comparison %r for %s" % (cmpop, ins))
if not (prefix == 'i' and cmpop in ('==', '!=')):
op = sign + op
if self.type != other.type:
raise ValueError("Operands must be the same type, got (%s, %s)"
% (self.type, other.type))
fmt = "{0} {1} ({2} {3}, {4} {5})".format(
ins, op,
self.type, self.get_reference(),
other.type, other.get_reference())
return FormattedConstant(types.IntType(1), fmt)
def icmp_signed(self, cmpop, other):
"""
Signed integer comparison:
lhs <cmpop> rhs
where cmpop can be '==', '!=', '<', '<=', '>', '>='
"""
return self._cmp('i', 's', cmpop, other)
def icmp_unsigned(self, cmpop, other):
"""
Unsigned integer (or pointer) comparison:
lhs <cmpop> rhs
where cmpop can be '==', '!=', '<', '<=', '>', '>='
"""
return self._cmp('i', 'u', cmpop, other)
def fcmp_ordered(self, cmpop, other):
"""
Floating-point ordered comparison:
lhs <cmpop> rhs
where cmpop can be '==', '!=', '<', '<=', '>', '>=', 'ord', 'uno'
"""
return self._cmp('f', 'o', cmpop, other)
def fcmp_unordered(self, cmpop, other):
"""
Floating-point unordered comparison:
lhs <cmpop> rhs
where cmpop can be '==', '!=', '<', '<=', '>', '>=', 'ord', 'uno'
"""
return self._cmp('f', 'u', cmpop, other)
#
# Unary APIs
#
def not_(self):
"""
Bitwise integer complement:
~value
"""
if isinstance(self.type, types.VectorType):
rhs = values.Constant(self.type, (-1,) * self.type.count)
else:
rhs = values.Constant(self.type, -1)
return self.xor(rhs)
def neg(self):
"""
Integer negative:
-value
"""
zero = values.Constant(self.type, 0)
return zero.sub(self)
def fneg(self):
"""
Floating-point negative:
-value
"""
fmt = "fneg ({0} {1})".format(self.type, self.get_reference())
return FormattedConstant(self.type, fmt)
#
# Cast APIs
#
@_castop('trunc')
def trunc(self, typ):
"""
Truncating integer downcast to a smaller type.
"""
@_castop('zext')
def zext(self, typ):
"""
Zero-extending integer upcast to a larger type
"""
@_castop('sext')
def sext(self, typ):
"""
Sign-extending integer upcast to a larger type.
"""
@_castop('fptrunc')
def fptrunc(self, typ):
"""
Floating-point downcast to a less precise type.
"""
@_castop('fpext')
def fpext(self, typ):
"""
Floating-point upcast to a more precise type.
"""
@_castop('bitcast')
def bitcast(self, typ):
"""
Pointer cast to a different pointer type.
"""
@_castop('fptoui')
def fptoui(self, typ):
"""
Convert floating-point to unsigned integer.
"""
@_castop('uitofp')
def uitofp(self, typ):
"""
Convert unsigned integer to floating-point.
"""
@_castop('fptosi')
def fptosi(self, typ):
"""
Convert floating-point to signed integer.
"""
@_castop('sitofp')
def sitofp(self, typ):
"""
Convert signed integer to floating-point.
"""
@_castop('ptrtoint')
def ptrtoint(self, typ):
"""
Cast pointer to integer.
"""
if not isinstance(self.type, types.PointerType):
msg = "can only call ptrtoint() on pointer type, not '%s'"
raise TypeError(msg % (self.type,))
if not isinstance(typ, types.IntType):
raise TypeError("can only ptrtoint() to integer type, not '%s'"
% (typ,))
@_castop('inttoptr')
def inttoptr(self, typ):
"""
Cast integer to pointer.
"""
if not isinstance(self.type, types.IntType):
msg = "can only call inttoptr() on integer constants, not '%s'"
raise TypeError(msg % (self.type,))
if not isinstance(typ, types.PointerType):
raise TypeError("can only inttoptr() to pointer type, not '%s'"
% (typ,))
def gep(self, indices):
"""
Call getelementptr on this pointer constant.
"""
if not isinstance(self.type, types.PointerType):
raise TypeError("can only call gep() on pointer constants, not '%s'"
% (self.type,))
outtype = self.type
for i in indices:
outtype = outtype.gep(i)
strindices = ["{0} {1}".format(idx.type, idx.get_reference())
for idx in indices]
op = "getelementptr ({0}, {1} {2}, {3})".format(
self.type.pointee, self.type,
self.get_reference(), ', '.join(strindices))
return FormattedConstant(outtype.as_pointer(self.addrspace), op)
| _ConstOpMixin |
python | apache__airflow | helm-tests/tests/helm_tests/apiserver/test_ingress_apiserver.py | {
"start": 914,
"end": 9797
} | class ____:
"""Tests ingress API Server."""
def test_should_pass_validation_with_just_ingress_enabled_v1(self):
render_chart(
values={"ingress": {"apiServer": {"enabled": True}}, "airflowVersion": "3.0.0"},
show_only=["templates/api-server/api-server-ingress.yaml"],
) # checks that no validation exception is raised
def test_should_pass_validation_with_just_ingress_enabled_v1beta1(self):
render_chart(
values={"ingress": {"apiServer": {"enabled": True}}, "airflowVersion": "3.0.0"},
show_only=["templates/api-server/api-server-ingress.yaml"],
kubernetes_version="1.16.0",
) # checks that no validation exception is raised
def test_should_allow_more_than_one_annotation(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {"apiServer": {"enabled": True, "annotations": {"aa": "bb", "cc": "dd"}}},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert jmespath.search("metadata.annotations", docs[0]) == {"aa": "bb", "cc": "dd"}
def test_should_set_ingress_class_name(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {"apiServer": {"enabled": True, "ingressClassName": "foo"}},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert jmespath.search("spec.ingressClassName", docs[0]) == "foo"
def test_should_ingress_hosts_objs_have_priority_over_host(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {
"apiServer": {
"enabled": True,
"tls": {"enabled": True, "secretName": "oldsecret"},
"hosts": [
{"name": "*.a-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "b-host", "tls": {"enabled": True, "secretName": "newsecret2"}},
{"name": "c-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "d-host", "tls": {"enabled": False, "secretName": ""}},
{"name": "e-host"},
],
"host": "old-host",
},
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert jmespath.search("spec.rules[*].host", docs[0]) == [
"*.a-host",
"b-host",
"c-host",
"d-host",
"e-host",
]
assert jmespath.search("spec.tls[*]", docs[0]) == [
{"hosts": ["*.a-host"], "secretName": "newsecret1"},
{"hosts": ["b-host"], "secretName": "newsecret2"},
{"hosts": ["c-host"], "secretName": "newsecret1"},
]
def test_should_ingress_hosts_strs_have_priority_over_host(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {
"apiServer": {
"enabled": True,
"tls": {"enabled": True, "secretName": "secret"},
"hosts": ["*.a-host", "b-host", "c-host", "d-host"],
"host": "old-host",
},
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert jmespath.search("spec.rules[*].host", docs[0]) == ["*.a-host", "b-host", "c-host", "d-host"]
assert jmespath.search("spec.tls[*]", docs[0]) == [
{"hosts": ["*.a-host", "b-host", "c-host", "d-host"], "secretName": "secret"}
]
def test_should_ingress_deprecated_host_and_top_level_tls_still_work(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {
"apiServer": {
"enabled": True,
"tls": {"enabled": True, "secretName": "supersecret"},
"host": "old-host",
},
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert (
["old-host"]
== jmespath.search("spec.rules[*].host", docs[0])
== jmespath.search("spec.tls[0].hosts", docs[0])
)
def test_should_ingress_host_entry_not_exist(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {
"apiServer": {
"enabled": True,
}
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert not jmespath.search("spec.rules[*].host", docs[0])
@pytest.mark.parametrize(
("global_value", "api_server_value", "expected"),
[
(None, None, False),
(None, False, False),
(None, True, True),
(False, None, False),
(True, None, True),
(False, True, True), # We will deploy it if _either_ are true
(True, False, True),
],
)
def test_ingress_created(self, global_value, api_server_value, expected):
values = {"airflowVersion": "3.0.0", "ingress": {}}
if global_value is not None:
values["ingress"]["enabled"] = global_value
if api_server_value is not None:
values["ingress"]["apiServer"] = {"enabled": api_server_value}
if values["ingress"] == {}:
del values["ingress"]
docs = render_chart(values=values, show_only=["templates/api-server/api-server-ingress.yaml"])
assert expected == (len(docs) == 1)
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {"enabled": True},
"apiServer": {
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
def test_can_ingress_hosts_be_templated(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"testValues": {
"scalar": "aa",
"list": ["bb", "cc"],
"dict": {
"key": "dd",
},
},
"ingress": {
"apiServer": {
"enabled": True,
"hosts": [
{"name": "*.{{ .Release.Namespace }}.example.com"},
{"name": "{{ .Values.testValues.scalar }}.example.com"},
{"name": "{{ index .Values.testValues.list 1 }}.example.com"},
{"name": "{{ .Values.testValues.dict.key }}.example.com"},
],
},
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
namespace="airflow",
)
assert jmespath.search("spec.rules[*].host", docs[0]) == [
"*.airflow.example.com",
"aa.example.com",
"cc.example.com",
"dd.example.com",
]
def test_backend_service_name(self):
docs = render_chart(
values={"airflowVersion": "3.0.0", "ingress": {"apiServer": {"enabled": True}}},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert (
jmespath.search("spec.rules[0].http.paths[0].backend.service.name", docs[0])
== "release-name-api-server"
)
def test_backend_service_name_with_fullname_override(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"fullnameOverride": "test-basic",
"useStandardNaming": True,
"ingress": {"apiServer": {"enabled": True}},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert (
jmespath.search("spec.rules[0].http.paths[0].backend.service.name", docs[0])
== "test-basic-api-server"
)
| TestIngressAPIServer |
python | ipython__ipython | IPython/utils/strdispatch.py | {
"start": 185,
"end": 1832
} | class ____:
"""Dispatch (lookup) a set of strings / regexps for match.
Example:
>>> dis = StrDispatch()
>>> dis.add_s('hei',34, priority = 4)
>>> dis.add_s('hei',123, priority = 2)
>>> dis.add_re('h.i', 686)
>>> print(list(dis.flat_matches('hei')))
[123, 34, 686]
"""
def __init__(self):
self.strs = {}
self.regexs = {}
def add_s(self, s, obj, priority= 0 ):
""" Adds a target 'string' for dispatching """
chain = self.strs.get(s, CommandChainDispatcher())
chain.add(obj,priority)
self.strs[s] = chain
def add_re(self, regex, obj, priority= 0 ):
""" Adds a target regexp for dispatching """
chain = self.regexs.get(regex, CommandChainDispatcher())
chain.add(obj,priority)
self.regexs[regex] = chain
def dispatch(self, key):
""" Get a seq of Commandchain objects that match key """
if key in self.strs:
yield self.strs[key]
for r, obj in self.regexs.items():
if re.match(r, key):
yield obj
else:
# print("nomatch",key) # dbg
pass
def __repr__(self):
return "<Strdispatch %s, %s>" % (self.strs, self.regexs)
def s_matches(self, key):
if key not in self.strs:
return
for el in self.strs[key]:
yield el[1]
def flat_matches(self, key):
""" Yield all 'value' targets, without priority """
for val in self.dispatch(key):
for el in val:
yield el[1] # only value, no priority
return
| StrDispatch |
python | huggingface__transformers | src/transformers/models/sam/modeling_sam.py | {
"start": 14985,
"end": 17074
} | class ____(nn.Module):
def __init__(self, config: SamMaskDecoderConfig):
super().__init__()
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.layers = nn.ModuleList()
for i in range(self.num_hidden_layers):
self.layers.append(SamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0)))
self.final_attn_token_to_image = SamAttention(config)
self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size)
def forward(
self,
point_embeddings: Tensor,
image_embeddings: Tensor,
image_positional_embeddings: Tensor,
attention_similarity: Tensor,
target_embedding=None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutput]:
if image_embeddings is None:
raise ValueError("You have to specify an image_embedding")
image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
# Prepare queries
queries = point_embeddings
keys = image_embeddings
# Apply transformer blocks and final layernorm
for layer in self.layers:
if target_embedding is not None:
queries += target_embedding
queries, keys, _ = layer(
queries=queries,
keys=keys,
query_point_embedding=point_embeddings,
key_point_embedding=image_positional_embeddings,
attention_similarity=attention_similarity,
**kwargs,
)
# Apply the final attention layer from the points to the image
query = queries + point_embeddings
key = keys + image_positional_embeddings
attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys)
queries = queries + attn_out
queries = self.layer_norm_final_attn(queries)
return queries, keys
| SamTwoWayTransformer |
python | sympy__sympy | sympy/simplify/hyperexpand.py | {
"start": 37343,
"end": 37694
} | class ____(Operator):
""" Decrement a lower index. """
def __init__(self, bi):
bi = sympify(bi)
if bi == 1:
raise ValueError('Cannot decrement unit lower index.')
self._poly = Poly(_x/(bi - 1) + 1, _x)
def __str__(self):
return '<Decrement lower %s.>' % (1/self._poly.all_coeffs()[0] + 1)
| ShiftB |
python | ray-project__ray | python/ray/tune/tests/test_tune_restore.py | {
"start": 17334,
"end": 18034
} | class ____(unittest.TestCase):
class MockSearcher(Searcher):
def __init__(self, data):
self.data = data
def save(self, path):
with open(path, "w") as f:
f.write(self.data)
def restore(self, path):
with open(path, "r") as f:
self.data = f.read()
def testSaveRestoreDir(self):
tmpdir = tempfile.mkdtemp()
original_data = "hello-its-me"
searcher = self.MockSearcher(original_data)
searcher.save_to_dir(tmpdir)
searcher_2 = self.MockSearcher("no-its-not-me")
searcher_2.restore_from_dir(tmpdir)
assert searcher_2.data == original_data
| SearcherTest |
python | joke2k__faker | faker/providers/barcode/en_US/__init__.py | {
"start": 156,
"end": 11356
} | class ____(BarcodeProvider):
"""Implement barcode provider for ``en_US`` locale.
Sources:
- https://gs1.org/standards/id-keys/company-prefix
"""
local_prefixes = (
*product((0,), range(10)),
*product((1,), range(4)),
)
upc_e_base_pattern: Pattern = re.compile(r"^\d{6}$")
upc_ae_pattern1: Pattern = re.compile(
r"^(?P<number_system_digit>[01])" # The first digit must be 0 or 1
r"(?=\d{11}$)" # followed by 11 digits of which
r"(?P<mfr_code>\d{2})" # the first 2 digits make up the manufacturer code,
r"(?:(?P<extra>[012])0{4})" # if immediately followed by 00000, 10000, or 20000,
r"(?P<product_code>\d{3})" # a 3-digit product code,
r"(?P<check_digit>\d)$", # and finally a check digit.
)
upc_ae_pattern2: Pattern = re.compile(
r"^(?P<number_system_digit>[01])" # The first digit must be 0 or 1
r"(?=\d{11}$)" # followed by 11 digits of which
r"(?P<mfr_code>\d{3,4}?)" # the first 3 or 4 digits make up the manufacturer code,
r"(?:0{5})" # if immediately followed by 00000,
r"(?P<product_code>\d{1,2})" # a 2-digit or single digit product code,
r"(?P<check_digit>\d)$", # and finally a check digit.
)
upc_ae_pattern3: Pattern = re.compile(
r"^(?P<number_system_digit>[01])" # The first digit must be 0 or 1
r"(?=\d{11}$)" # followed by 11 digits of which
r"(?P<mfr_code>\d{5})" # the first 5 digits make up the manufacturer code,
r"(?:0{4}(?P<extra>[5-9]))" # if immediately followed by 0000 and a 5, 6, 7, 8, or 9,
r"(?P<check_digit>\d)$", # and finally a check digit.
)
def ean13(self, prefixes: PrefixType = (), leading_zero: Optional[bool] = None) -> str:
"""Generate an EAN-13 barcode.
If ``leading_zero`` is ``True``, the leftmost digit of the barcode will
be set to ``0``. If ``False``, the leftmost digit cannot be ``0``. If
``None`` (default), the leftmost digit can be any digit.
If a value for ``prefixes`` is specified, the result will begin with one
of the sequences in ``prefixes`` and will ignore ``leading_zero``.
This method uses the standard barcode provider's |ean13| under the
hood with the ``prefixes`` argument set to the correct value to attain
the behavior described above.
.. note::
EAN-13 barcode that starts with a zero can be converted to UPC-A
by dropping the leading zero. This may cause problems with readers
that treat all of these code as UPC-A codes and drop the first digit
when reading it.
You can set the argument ``prefixes`` ( or ``leading_zero`` for
convenience) explicitly to avoid or to force the generated barcode to
start with a zero. You can also generate actual UPC-A barcode with
|EnUsBarcodeProvider.upc_a|.
:sample:
:sample: leading_zero=False
:sample: leading_zero=True
:sample: prefixes=('00',)
:sample: prefixes=('45', '49')
"""
if not prefixes:
if leading_zero is True:
prefixes = ((0,),)
elif leading_zero is False:
prefixes = ((self.random_int(1, 9),),)
return super().ean13(prefixes=prefixes)
def _convert_upc_a2e(self, upc_a: str) -> str:
"""Convert a 12-digit UPC-A barcode to its 8-digit UPC-E equivalent.
.. warning::
Not all UPC-A barcodes can be converted.
"""
if not isinstance(upc_a, str):
raise TypeError("`upc_a` is not a string")
m1 = self.upc_ae_pattern1.match(upc_a)
m2 = self.upc_ae_pattern2.match(upc_a)
m3 = self.upc_ae_pattern3.match(upc_a)
if not any([m1, m2, m3]):
raise ValueError("`upc_a` has an invalid value")
upc_e_template = "{number_system_digit}{mfr_code}{product_code}{extra}{check_digit}"
if m1:
upc_e = upc_e_template.format(**m1.groupdict())
elif m2:
groupdict: Dict[str, str] = m2.groupdict()
mfr_code = groupdict.get("mfr_code") or ""
groupdict["extra"] = str(len(mfr_code))
upc_e = upc_e_template.format(**groupdict)
elif m3:
groupdict = m3.groupdict()
groupdict["product_code"] = ""
upc_e = upc_e_template.format(**groupdict)
return upc_e
def _upc_ae(self, base: Optional[str] = None, number_system_digit: Optional[int] = None) -> str:
"""Create a 12-digit UPC-A barcode that can be converted to UPC-E.
The expected value of ``base`` is a 6-digit string. If any other value
is provided, this method will use a random 6-digit string instead.
The expected value of ``number_system_digit`` is the integer ``0`` or
``1``. If any other value is provided, this method will randomly choose
from the two.
Please also view notes on |EnUsBarcodeProvider.upc_a| and
|EnUsBarcodeProvider.upc_e| for more details.
"""
base_ = (
[int(x) for x in base]
if isinstance(base, str) and self.upc_e_base_pattern.match(base)
else [self.random_int(0, 9) for _ in range(6)]
)
if number_system_digit not in [0, 1]:
number_system_digit = self.random_int(0, 1)
if base_[-1] <= 2:
code = base_[:2] + base_[-1:] + [0] * 4 + base_[2:-1]
elif base_[-1] <= 4:
code = base_[: base_[-1]] + [0] * 5 + base_[base_[-1] : -1]
else:
code = base_[:5] + [0] * 4 + base_[-1:]
code.insert(0, number_system_digit)
weights = [3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3]
weighted_sum = sum(x * y for x, y in zip(code, weights))
check_digit = (10 - weighted_sum % 10) % 10
code.append(check_digit)
return "".join(str(x) for x in code)
def upc_a(
self,
upc_ae_mode: bool = False,
base: Optional[str] = None,
number_system_digit: Optional[int] = None,
) -> str:
"""Generate a 12-digit UPC-A barcode.
The value of ``upc_ae_mode`` controls how barcodes will be generated. If
``False`` (default), barcodes are not guaranteed to have a UPC-E
equivalent. In this mode, the method uses |EnUsBarcodeProvider.ean13|
under the hood, and the values of ``base`` and ``number_system_digit``
will be ignored.
If ``upc_ae_mode`` is ``True``, the resulting barcodes are guaranteed to
have a UPC-E equivalent, and the values of ``base`` and
``number_system_digit`` will be used to control what is generated.
Under this mode, ``base`` is expected to have a 6-digit string value. If
any other value is supplied, a random 6-digit string will be used
instead. As for ``number_system_digit``, the expected value is a ``0``
or a ``1``. If any other value is provided, this method will randomly
choose from the two.
.. important::
When ``upc_ae_mode`` is enabled, you might encounter instances where
different values of ``base`` (e.g. ``'120003'`` and ``'120004'``)
produce the same UPC-A barcode. This is normal, and the reason lies
within the whole conversion process. To learn more about this and
what ``base`` and ``number_system_digit`` actually represent, please
refer to |EnUsBarcodeProvider.upc_e|.
:sample:
:sample: upc_ae_mode=True, number_system_digit=0
:sample: upc_ae_mode=True, number_system_digit=1
:sample: upc_ae_mode=True, base='123456', number_system_digit=0
:sample: upc_ae_mode=True, base='120003', number_system_digit=0
:sample: upc_ae_mode=True, base='120004', number_system_digit=0
"""
if upc_ae_mode is True:
return self._upc_ae(base=base, number_system_digit=number_system_digit)
else:
ean13 = self.ean13(leading_zero=True)
return ean13[1:]
def upc_e(
self,
base: Optional[str] = None,
number_system_digit: Optional[int] = None,
safe_mode: bool = True,
) -> str:
"""Generate an 8-digit UPC-E barcode.
UPC-E barcodes can be expressed in 6, 7, or 8-digit formats, but this
method uses the 8 digit format, since it is trivial to convert to the
other two formats. The first digit (starting from the left) is
controlled by ``number_system_digit``, and it can only be a ``0`` or a
``1``. The last digit is the check digit that is inherited from the
UPC-E barcode's UPC-A equivalent. The middle six digits are collectively
referred to as the ``base`` (for a lack of a better term).
On that note, this method uses ``base`` and ``number_system_digit`` to
first generate a UPC-A barcode for the check digit, and what happens
next depends on the value of ``safe_mode``. The argument ``safe_mode``
exists, because there are some UPC-E values that share the same UPC-A
equivalent. For example, any UPC-E barcode of the form ``abc0000d``,
``abc0003d``, and ``abc0004d`` share the same UPC-A value
``abc00000000d``, but that UPC-A value will only convert to ``abc0000d``
because of (a) how UPC-E is just a zero-suppressed version of UPC-A and
(b) the rules around the conversion.
If ``safe_mode`` is ``True`` (default), this method performs another set
of conversions to guarantee that the UPC-E barcodes generated can be
converted to UPC-A, and that UPC-A barcode can be converted back to the
original UPC-E barcode. Using the example above, even if the bases
``120003`` or ``120004`` are used, the resulting UPC-E barcode will
always use the base ``120000``.
If ``safe_mode`` is ``False``, then the ``number_system_digit``,
``base``, and the computed check digit will just be concatenated
together to produce the UPC-E barcode, and attempting to convert the
barcode to UPC-A and back again to UPC-E will exhibit the behavior
described above.
:sample:
:sample: base='123456'
:sample: base='123456', number_system_digit=0
:sample: base='123456', number_system_digit=1
:sample: base='120000', number_system_digit=0
:sample: base='120003', number_system_digit=0
:sample: base='120004', number_system_digit=0
:sample: base='120000', number_system_digit=0, safe_mode=False
:sample: base='120003', number_system_digit=0, safe_mode=False
:sample: base='120004', number_system_digit=0, safe_mode=False
"""
if safe_mode is not False:
upc_ae = self._upc_ae(base=base, number_system_digit=number_system_digit)
return self._convert_upc_a2e(upc_ae)
else:
upc_ae = self._upc_ae(base=base, number_system_digit=number_system_digit)
return upc_ae[0] + "".join(str(x) for x in base or "") + upc_ae[-1]
| Provider |
python | kamyu104__LeetCode-Solutions | Python/count-pairs-with-xor-in-a-range.py | {
"start": 783,
"end": 1611
} | class ____(object):
def __init__(self):
self.__root = {}
def insert(self, num):
node = self.__root
for i in reversed(xrange(32)):
curr = (num>>i) & 1
if curr not in node:
node[curr] = {"_count":0}
node = node[curr]
node["_count"] += 1
def query(self, num, limit):
node, result = self.__root, 0
for i in reversed(xrange(32)):
curr = (num>>i) & 1
bit = (limit>>i) & 1
if bit:
if curr in node:
result += node[0^curr]["_count"] # current limit is xxxxx1*****, count xor pair with xxxxx0***** pattern
if bit^curr not in node:
break
node = node[bit^curr]
return result
| Trie |
python | django__django | tests/signals/tests.py | {
"start": 17847,
"end": 17967
} | class ____:
param = 0
def __call__(self, **kwargs):
self.param += 1
return self.param
| SyncHandler |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/dropbox/tests.py | {
"start": 260,
"end": 1373
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = DropboxOAuth2Provider.id
def get_mocked_response(self):
payload = {
"account_id": "dbid:ASDFasd3ASdfasdFAsd1AS2ASDF1aS-DfAs",
"account_type": {".tag": "basic"},
"country": "US",
"disabled": False,
"email": "allauth@example.com",
"email_verified": True,
"is_paired": True,
"locale": "en",
"name": {
"abbreviated_name": "AA",
"display_name": "All Auth",
"familiar_name": "All",
"given_name": "All",
"surname": "Auth",
},
"profile_photo_url": (
"https://dl-web.dropbox.com/account_photo"
"/get/dbid%ASDFasd3ASdfasdFAsd1AS2ASDF1aS"
"-DfAs?size=128x128"
),
"referral_link": "https://db.tt/ASDfAsDf",
}
return MockedResponse(HTTPStatus.OK, json.dumps(payload))
def get_expected_to_str(self):
return "allauth@example.com"
| DropboxOAuth2Tests |
python | apache__airflow | dev/breeze/tests/test_ui_commands.py | {
"start": 6875,
"end": 7253
} | class ____:
def test_locale_key_set_with_keys(self):
lks = LocaleKeySet(locale="en", keys={"key1", "key2"})
assert lks.locale == "en"
assert lks.keys == {"key1", "key2"}
def test_locale_key_set_without_keys(self):
lks = LocaleKeySet(locale="de", keys=None)
assert lks.locale == "de"
assert lks.keys is None
| TestLocaleKeySet |
python | tensorflow__tensorflow | tensorflow/python/training/saving/saveable_object.py | {
"start": 2100,
"end": 3369
} | class ____:
"""Base class for saving and restoring saveable objects."""
def __init__(self, op, specs, name):
"""Creates a `SaveableObject` object.
Args:
op: the "producer" object that this class wraps; it produces a list of
tensors to save. E.g., a "Variable" object saving its backing tensor.
specs: a list of SaveSpec, each element of which describes one tensor to
save under this object. All Tensors must be on the same device.
name: the name to save the object under.
"""
self.op = op
self.specs = specs
self.name = name
@property
def device(self):
"""The device for SaveSpec Tensors."""
return self.specs[0].device
def restore(self, restored_tensors, restored_shapes):
"""Restores this object from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint
restored_shapes: the shapes this object should conform to after
restore, or None.
Returns:
An operation that restores the state of the object.
Raises:
ValueError: If the object cannot be restored using the provided
parameters.
"""
# pylint: disable=unused-argument
raise ValueError("Calling an abstract method.")
| SaveableObject |
python | Netflix__metaflow | metaflow/datastore/datastore_set.py | {
"start": 345,
"end": 2317
} | class ____(object):
def __init__(
self,
flow_datastore,
run_id,
steps=None,
pathspecs=None,
prefetch_data_artifacts=None,
allow_not_done=False,
join_type=None,
orig_flow_datastore=None,
spin_artifacts=None,
):
self.task_datastores = flow_datastore.get_task_datastores(
run_id,
steps=steps,
pathspecs=pathspecs,
allow_not_done=allow_not_done,
join_type=join_type,
orig_flow_datastore=orig_flow_datastore,
spin_artifacts=spin_artifacts,
)
if prefetch_data_artifacts:
# produce a set of SHA keys to prefetch based on artifact names
prefetch = set()
for ds in self.task_datastores:
prefetch.update(ds.keys_for_artifacts(prefetch_data_artifacts))
# ignore missing keys
prefetch.discard(None)
# prefetch artifacts and share them with all datastores
# in this DatastoreSet
preloaded = dict(flow_datastore.ca_store.load_blobs(prefetch))
cache = ImmutableBlobCache(preloaded)
flow_datastore.ca_store.set_blob_cache(cache)
self.pathspec_index_cache = {}
self.pathspec_cache = {}
if not allow_not_done:
for ds in self.task_datastores:
self.pathspec_index_cache[ds.pathspec_index] = ds
self.pathspec_cache[ds.pathspec] = ds
def get_with_pathspec(self, pathspec):
return self.pathspec_cache.get(pathspec, None)
def get_with_pathspec_index(self, pathspec_index):
return self.pathspec_index_cache.get(pathspec_index, None)
def __iter__(self):
for v in self.task_datastores:
yield v
"""
This class ensures that blobs that correspond to artifacts that
are common to all datastores in this set are only loaded once
"""
| TaskDataStoreSet |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 37011,
"end": 37517
} | class ____(_TestBasicOps, __TestCase):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{3}"
super().setUp()
def test_in(self):
self.assertIn(3, self.set)
def test_not_in(self):
self.assertNotIn(2, self.set)
#------------------------------------------------------------------------------
| TestBasicOpsSingleton |
python | pydata__xarray | xarray/tests/test_tutorial.py | {
"start": 173,
"end": 835
} | class ____:
def test_download_from_github(self, tmp_path) -> None:
cache_dir = tmp_path / tutorial._default_cache_dir_name
ds = tutorial.load_dataset("tiny", cache_dir=cache_dir)
tiny = DataArray(range(5), name="tiny").to_dataset()
assert_identical(ds, tiny)
def test_download_from_github_load_without_cache(self, tmp_path) -> None:
cache_dir = tmp_path / tutorial._default_cache_dir_name
ds_nocache = tutorial.load_dataset("tiny", cache=False, cache_dir=cache_dir)
ds_cache = tutorial.load_dataset("tiny", cache_dir=cache_dir)
assert_identical(ds_cache, ds_nocache)
@network
| TestLoadDataset |
python | pytorch__pytorch | test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py | {
"start": 2089,
"end": 10454
} | class ____(TestCase):
def setUp(self) -> None:
# For testing, the default parameters used are for tcp. If a test
# uses parameters for file store, we set the self._params to
# self._params_filestore.
port = get_free_port()
self._params = RendezvousParameters(
backend="dummy_backend",
endpoint=f"localhost:{port}",
run_id="dummy_run_id",
min_nodes=1,
max_nodes=1,
is_host="true",
store_type="tCp",
read_timeout="10",
)
_, tmp_path = tempfile.mkstemp()
# Parameters for filestore testing.
self._params_filestore = RendezvousParameters(
backend="dummy_backend",
endpoint=tmp_path,
run_id="dummy_run_id",
min_nodes=1,
max_nodes=1,
store_type="fIlE",
)
self._expected_endpoint_file = tmp_path
self._expected_temp_dir = tempfile.gettempdir()
self._expected_endpoint_host = "localhost"
self._expected_endpoint_port = port
self._expected_store_type = TCPStore
self._expected_read_timeout = timedelta(seconds=10)
def tearDown(self) -> None:
os.remove(self._expected_endpoint_file)
def _run_test_with_store(self, store_type: str, test_to_run: Callable):
"""
Use this function to specify the store type to use in a test. If
not used, the test will default to TCPStore.
"""
if store_type == "file":
self._params = self._params_filestore
self._expected_store_type = FileStore
self._expected_read_timeout = timedelta(seconds=300)
test_to_run()
def _assert_create_backend_returns_backend(self) -> None:
backend, store = create_backend(self._params)
self.assertEqual(backend.name, "c10d")
self.assertIsInstance(store, self._expected_store_type)
typecast_store = cast(self._expected_store_type, store)
self.assertEqual(typecast_store.timeout, self._expected_read_timeout) # type: ignore[attr-defined]
if self._expected_store_type == TCPStore:
self.assertEqual(typecast_store.host, self._expected_endpoint_host) # type: ignore[attr-defined]
self.assertEqual(typecast_store.port, self._expected_endpoint_port) # type: ignore[attr-defined]
if self._expected_store_type == FileStore:
if self._params.endpoint:
self.assertEqual(typecast_store.path, self._expected_endpoint_file) # type: ignore[attr-defined]
else:
self.assertTrue(typecast_store.path.startswith(self._expected_temp_dir)) # type: ignore[attr-defined]
backend.set_state(b"dummy_state")
state = store.get("torch.rendezvous." + self._params.run_id)
self.assertEqual(state, b64encode(b"dummy_state"))
def test_create_backend_returns_backend(self) -> None:
for store_type in ["tcp", "file"]:
with self.subTest(store_type=store_type):
self._run_test_with_store(
store_type, self._assert_create_backend_returns_backend
)
def test_create_backend_returns_backend_if_is_host_is_false(self) -> None:
TCPStore( # type: ignore[call-arg]
self._expected_endpoint_host, self._expected_endpoint_port, is_master=True
)
self._params.config["is_host"] = "false"
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_if_is_host_is_not_specified(self) -> None:
del self._params.config["is_host"]
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_if_is_host_is_not_specified_and_store_already_exists(
self,
) -> None:
TCPStore( # type: ignore[call-arg]
self._expected_endpoint_host, self._expected_endpoint_port, is_master=True
)
del self._params.config["is_host"]
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_if_endpoint_port_is_not_specified(
self,
) -> None:
# patch default port and pass endpoint with no port specified
with mock.patch(
"torch.distributed.elastic.rendezvous.c10d_rendezvous_backend.DEFAULT_PORT",
self._expected_endpoint_port,
):
self._params.endpoint = self._expected_endpoint_host
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_if_endpoint_file_is_not_specified(
self,
) -> None:
self._params_filestore.endpoint = ""
self._run_test_with_store("file", self._assert_create_backend_returns_backend)
def test_create_backend_returns_backend_if_store_type_is_not_specified(
self,
) -> None:
del self._params.config["store_type"]
self._expected_store_type = TCPStore
if not self._params.get("read_timeout"):
self._expected_read_timeout = timedelta(seconds=60)
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_if_read_timeout_is_not_specified(
self,
) -> None:
del self._params.config["read_timeout"]
self._expected_read_timeout = timedelta(seconds=60)
self._assert_create_backend_returns_backend()
def test_create_backend_raises_error_if_store_is_unreachable(self) -> None:
self._params.config["is_host"] = "false"
self._params.config["read_timeout"] = "2"
with self.assertRaisesRegex(
RendezvousConnectionError,
r"^The connection to the C10d store has failed. See inner exception for details.$",
):
create_backend(self._params)
def test_create_backend_raises_error_if_endpoint_is_invalid(self) -> None:
for is_host in [True, False]:
with self.subTest(is_host=is_host):
self._params.config["is_host"] = str(is_host)
self._params.endpoint = "dummy_endpoint"
with self.assertRaisesRegex(
RendezvousConnectionError,
r"^The connection to the C10d store has failed. See inner exception for "
r"details.$",
):
create_backend(self._params)
def test_create_backend_raises_error_if_store_type_is_invalid(self) -> None:
self._params.config["store_type"] = "dummy_store_type"
with self.assertRaisesRegex(
ValueError,
r"^Invalid store type given. Currently only supports file and tcp.$",
):
create_backend(self._params)
def test_create_backend_raises_error_if_read_timeout_is_invalid(self) -> None:
for read_timeout in ["0", "-10"]:
with self.subTest(read_timeout=read_timeout):
self._params.config["read_timeout"] = read_timeout
with self.assertRaisesRegex(
ValueError, r"^The read timeout must be a positive integer.$"
):
create_backend(self._params)
@mock.patch("tempfile.mkstemp")
def test_create_backend_raises_error_if_tempfile_creation_fails(
self, tempfile_mock
) -> None:
tempfile_mock.side_effect = OSError("test error")
# Set the endpoint to empty so it defaults to creating a temp file
self._params_filestore.endpoint = ""
with self.assertRaisesRegex(
RendezvousError,
r"The file creation for C10d store has failed. See inner exception for details.",
):
create_backend(self._params_filestore)
@mock.patch(
"torch.distributed.elastic.rendezvous.c10d_rendezvous_backend.FileStore"
)
def test_create_backend_raises_error_if_file_path_is_invalid(
self, filestore_mock
) -> None:
filestore_mock.side_effect = RuntimeError("test error")
self._params_filestore.endpoint = "bad file path"
with self.assertRaisesRegex(
RendezvousConnectionError,
r"^The connection to the C10d store has failed. See inner exception for "
r"details.$",
):
create_backend(self._params_filestore)
| CreateBackendTest |
python | openai__openai-python | src/openai/types/beta/file_search_tool.py | {
"start": 626,
"end": 1555
} | class ____(BaseModel):
max_num_results: Optional[int] = None
"""The maximum number of results the file search tool should output.
The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
should be between 1 and 50 inclusive.
Note that the file search tool may output fewer than `max_num_results` results.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
ranking_options: Optional[FileSearchRankingOptions] = None
"""The ranking options for the file search.
If not specified, the file search tool will use the `auto` ranker and a
score_threshold of 0.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
| FileSearch |
python | getsentry__sentry | src/sentry/http.py | {
"start": 1131,
"end": 1399
} | class ____(Exception):
error_type = EventError.UNKNOWN_ERROR
def __init__(self, data=None):
if data is None:
data = {}
data.setdefault("type", self.error_type)
super().__init__(data["type"])
self.data = data
| BadSource |
python | pypa__hatch | src/hatch/config/model.py | {
"start": 557,
"end": 1144
} | class ____:
def __init__(self, config: dict, steps: tuple = ()):
self.raw_data = config
self.steps = steps
def parse_fields(self):
for attribute in self.__dict__:
_, prefix, name = attribute.partition("_field_")
if prefix:
parse_config(getattr(self, name))
def raise_error(self, message, *, extra_steps=()):
import inspect
field = inspect.currentframe().f_back.f_code.co_name
raise ConfigurationError(message, location=" -> ".join([*self.steps, field, *extra_steps]))
| LazilyParsedConfig |
python | huggingface__transformers | src/transformers/models/donut/modeling_donut_swin.py | {
"start": 1545,
"end": 2593
} | class ____(ModelOutput):
r"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
DonutSwin model's outputs that also contains a pooling of the last hidden states.
"""
)
# Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->DonutSwin
| DonutSwinEncoderOutput |
python | tensorflow__tensorflow | tensorflow/python/framework/versions_test.py | {
"start": 830,
"end": 1990
} | class ____(test.TestCase):
def testVersion(self):
self.assertEqual(type(versions.__version__), str)
self.assertEqual(type(versions.VERSION), str)
# This pattern will need to grow as we include alpha, builds, etc.
self.assertRegex(
versions.__version__, r'^\d+\.\d+\.(\d+(\-\w+)?(\+\w+)?|head)$'
)
self.assertRegex(
versions.VERSION, r'^\d+\.\d+\.(\d+(\-\w+)?(\+\w+)?|head)$'
)
def testGraphDefVersion(self):
version = versions.GRAPH_DEF_VERSION
min_consumer = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
min_producer = versions.GRAPH_DEF_VERSION_MIN_PRODUCER
for v in version, min_consumer, min_producer:
self.assertEqual(type(v), int)
self.assertLessEqual(0, min_consumer)
self.assertLessEqual(0, min_producer)
self.assertLessEqual(min_producer, version)
def testGitAndCompilerVersion(self):
self.assertEqual(type(versions.__git_version__), str)
self.assertEqual(type(versions.__compiler_version__), str)
self.assertEqual(type(versions.GIT_VERSION), str)
self.assertEqual(type(versions.COMPILER_VERSION), str)
if __name__ == '__main__':
test.main()
| VersionTest |
python | doocs__leetcode | solution/2700-2799/2762.Continuous Subarrays/Solution.py | {
"start": 0,
"end": 308
} | class ____:
def continuousSubarrays(self, nums: List[int]) -> int:
ans = i = 0
sl = SortedList()
for x in nums:
sl.add(x)
while sl[-1] - sl[0] > 2:
sl.remove(nums[i])
i += 1
ans += len(sl)
return ans
| Solution |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 162442,
"end": 163700
} | class ____(Operation):
def call(self, x1, x2):
return backend.numpy.outer(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [1])
x2_shape = getattr(x2, "shape", [1])
if None in x1_shape:
x1_flatten_shape = None
else:
x1_flatten_shape = int(np.prod(x1_shape))
if None in x2_shape:
x2_flatten_shape = None
else:
x2_flatten_shape = int(np.prod(x2_shape))
output_shape = [x1_flatten_shape, x2_flatten_shape]
output_dtype = backend.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
return KerasTensor(output_shape, dtype=output_dtype)
@keras_export(["keras.ops.outer", "keras.ops.numpy.outer"])
def outer(x1, x2):
"""Compute the outer product of two vectors.
Given two vectors `x1` and `x2`, the outer product is:
```
out[i, j] = x1[i] * x2[j]
```
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Outer product of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Outer().symbolic_call(x1, x2)
return backend.numpy.outer(x1, x2)
| Outer |
python | django__django | tests/invalid_models_tests/test_relative_fields.py | {
"start": 61487,
"end": 64797
} | class ____(SimpleTestCase):
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ForeignKey("Model", models.CASCADE)
self.assertEqual(
Model.check(),
[
Error(
"Reverse accessor 'Model.model_set' for "
"'invalid_models_tests.Model.model_set' clashes with field "
"name 'invalid_models_tests.Model.model_set'.",
hint=(
"Rename field 'invalid_models_tests.Model.model_set', or "
"add/change a related_name argument to the definition for "
"field 'invalid_models_tests.Model.model_set'."
),
obj=Model._meta.get_field("model_set"),
id="fields.E302",
),
],
)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ForeignKey("Model", models.CASCADE)
self.assertEqual(
Model.check(),
[
Error(
"Reverse query name for 'invalid_models_tests.Model.model' "
"clashes with field name 'invalid_models_tests.Model.model'.",
hint=(
"Rename field 'invalid_models_tests.Model.model', or "
"add/change a related_name argument to the definition for "
"field 'invalid_models_tests.Model.model'."
),
obj=Model._meta.get_field("model"),
id="fields.E303",
),
],
)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.CharField(max_length=10)
foreign = models.ForeignKey("Model", models.CASCADE, related_name="clash")
self.assertEqual(
Model.check(),
[
Error(
"Reverse accessor 'Model.clash' for "
"'invalid_models_tests.Model.foreign' clashes with field name "
"'invalid_models_tests.Model.clash'.",
hint=(
"Rename field 'invalid_models_tests.Model.clash', or "
"add/change a related_name argument to the definition for "
"field 'invalid_models_tests.Model.foreign'."
),
obj=Model._meta.get_field("foreign"),
id="fields.E302",
),
Error(
"Reverse query name for 'invalid_models_tests.Model.foreign' "
"clashes with field name 'invalid_models_tests.Model.clash'.",
hint=(
"Rename field 'invalid_models_tests.Model.clash', or "
"add/change a related_name argument to the definition for "
"field 'invalid_models_tests.Model.foreign'."
),
obj=Model._meta.get_field("foreign"),
id="fields.E303",
),
],
)
@isolate_apps("invalid_models_tests")
| SelfReferentialFKClashTests |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 66171,
"end": 66583
} | class ____(GenericFunction[_T]):
r"""Implement the ``ROLLUP`` grouping operation.
This function is used as part of the GROUP BY of a statement,
e.g. :meth:`_expression.Select.group_by`::
stmt = select(
func.sum(table.c.value), table.c.col_1, table.c.col_2
).group_by(func.rollup(table.c.col_1, table.c.col_2))
"""
_has_args = True
inherit_cache = True
| rollup |
python | PrefectHQ__prefect | tests/runtime/test_flow_run.py | {
"start": 20808,
"end": 22805
} | class ____:
@pytest.mark.parametrize("url_type", ["api_url", "ui_url"])
async def test_url_is_attribute(self, url_type: str):
assert url_type in dir(flow_run)
@pytest.mark.parametrize("url_type", ["api_url", "ui_url"])
async def test_url_is_none_when_id_not_set(self, url_type: str):
assert getattr(flow_run, url_type) is None
@pytest.mark.parametrize(
"url_type,",
["api_url", "ui_url"],
)
async def test_url_returns_correct_url_when_id_present(
self,
url_type: str,
):
test_id = "12345"
if url_type == "api_url":
base_url_value = PREFECT_API_URL.value()
elif url_type == "ui_url":
base_url_value = PREFECT_UI_URL.value()
else:
raise ValueError(f"Invalid url_type: {url_type}")
expected_url = f"{base_url_value}/flow-runs/flow-run/{test_id}"
with FlowRunContext.model_construct(
flow_run=FlowRun.model_construct(id=test_id)
):
assert getattr(flow_run, url_type) == expected_url
assert not getattr(flow_run, url_type)
@pytest.mark.parametrize(
"url_type,",
["api_url", "ui_url"],
)
async def test_url_pulls_from_api_when_needed(
self,
monkeypatch: pytest.MonkeyPatch,
prefect_client: PrefectClient,
url_type: str,
):
run = await prefect_client.create_flow_run(flow=flow(lambda: None, name="test"))
assert not getattr(flow_run, url_type)
if url_type == "api_url":
base_url_value = PREFECT_API_URL.value()
elif url_type == "ui_url":
base_url_value = PREFECT_UI_URL.value()
else:
raise ValueError(f"Invalid url_type: {url_type}")
expected_url = f"{base_url_value}/flow-runs/flow-run/{str(run.id)}"
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(run.id))
assert getattr(flow_run, url_type) == expected_url
| TestURL |
python | gevent__gevent | src/gevent/tests/test__server_pywsgi.py | {
"start": 2624,
"end": 2718
} | class ____(test__server.TestSSLSocketNotAllowed):
Settings = Settings
| TestSSLSocketNotAllowed |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1571680,
"end": 1572832
} | class ____(sgqlc.types.Type, Node):
"""An edit on user content"""
__schema__ = github_schema
__field_names__ = ("created_at", "deleted_at", "deleted_by", "diff", "edited_at", "editor", "updated_at")
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
deleted_at = sgqlc.types.Field(DateTime, graphql_name="deletedAt")
"""Identifies the date and time when the object was deleted."""
deleted_by = sgqlc.types.Field(Actor, graphql_name="deletedBy")
"""The actor who deleted this content"""
diff = sgqlc.types.Field(String, graphql_name="diff")
"""A summary of the changes for this edit"""
edited_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="editedAt")
"""When this content was edited"""
editor = sgqlc.types.Field(Actor, graphql_name="editor")
"""The actor who edited this content"""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
| UserContentEdit |
python | pytorch__pytorch | test/lazy/test_generator.py | {
"start": 232,
"end": 3163
} | class ____(TestCase):
def test_generator(self):
"""
Test that generators are being inserted into the TorchScript
graph by setting different seeds before each call to
generate_tensor but the resulting tensor is the same
"""
def generate_tensor():
g1 = torch.Generator()
g1.manual_seed(2023)
t1 = torch.tensor(1.0)
t1.uniform_(generator=g1)
g2 = torch.Generator()
g2.manual_seed(2024)
t2 = torch.tensor(1.0)
t2.normal_(generator=g2)
return t1, t2
torch.manual_seed(1)
with torch.device("cpu"):
cpu_t1, cpu_t2 = generate_tensor()
torch.manual_seed(2)
with torch.device("lazy"):
lazy_t1, lazy_t2 = generate_tensor()
torch._lazy.mark_step()
assert torch.allclose(cpu_t1, lazy_t1.to("cpu")), (
f"Expected {cpu_t1}, got {lazy_t1.to('cpu')}"
)
assert torch.allclose(cpu_t2, lazy_t2.to("cpu")), (
f"Expected {cpu_t2}, got {lazy_t2.to('cpu')}"
)
@skipIfTorchDynamo("Torch Dynamo does not support torch.Generator type")
def test_generator_causes_multiple_compiles(self):
"""
Test that inserting generators with different seed caused recompile
"""
def generate_tensor(seed):
t = torch.tensor(1.0)
g = torch.Generator()
g.manual_seed(seed)
t.uniform_(-1, 1, generator=g)
return t
metrics.reset()
with torch.device("lazy"):
t = generate_tensor(1)
torch._lazy.mark_step()
uncached_compile = metrics.counter_value("UncachedCompile")
assert uncached_compile == 1, (
f"Expected 1 uncached compiles, got {uncached_compile}"
)
t = generate_tensor(2)
torch._lazy.mark_step()
uncached_compile = metrics.counter_value("UncachedCompile")
assert uncached_compile == 2, (
f"Expected 2 uncached compiles, got {uncached_compile}"
)
t = generate_tensor(1) # noqa: F841
torch._lazy.mark_step()
uncached_compile = metrics.counter_value("UncachedCompile")
assert uncached_compile == 2, (
f"Expected 2 uncached compiles, got {uncached_compile}"
)
cached_compile = metrics.counter_value("CachedCompile")
assert cached_compile == 1, (
f"Expected 1 cached compile, got {cached_compile}"
)
metrics.reset()
latest_graph = torch._C._lazy_ts_backend._get_latest_computation_graph()
assert 'torch.Generator(device="cpu", seed=1)' in latest_graph
assert "aten::uniform" in latest_graph
if __name__ == "__main__":
run_tests()
| LazyGeneratorTest |
python | crytic__slither | slither/core/expressions/member_access.py | {
"start": 112,
"end": 872
} | class ____(Expression):
def __init__(self, member_name: str, member_type: str, expression: Expression) -> None:
# assert isinstance(member_type, Type)
# TODO member_type is not always a Type
assert isinstance(expression, Expression)
super().__init__()
self._type: Type = member_type
self._member_name: str = member_name
self._expression: Expression = expression
@property
def expression(self) -> Expression:
return self._expression
@property
def member_name(self) -> str:
return self._member_name
@property
def type(self) -> Type:
return self._type
def __str__(self) -> str:
return str(self.expression) + "." + self.member_name
| MemberAccess |
python | doocs__leetcode | solution/1900-1999/1984.Minimum Difference Between Highest and Lowest of K Scores/Solution.py | {
"start": 0,
"end": 181
} | class ____:
def minimumDifference(self, nums: List[int], k: int) -> int:
nums.sort()
return min(nums[i + k - 1] - nums[i] for i in range(len(nums) - k + 1))
| Solution |
python | astropy__astropy | astropy/table/bst.py | {
"start": 1921,
"end": 3452
} | class ____:
"""
An element in a binary search tree, containing
a key, data, and references to children nodes and
a parent node.
Parameters
----------
key : tuple
Node key
data : list or int
Node data
"""
__lt__ = lambda x, y: x.key < y.key
__le__ = lambda x, y: x.key <= y.key
__eq__ = lambda x, y: x.key == y.key
__ge__ = lambda x, y: x.key >= y.key
__gt__ = lambda x, y: x.key > y.key
__ne__ = lambda x, y: x.key != y.key
__slots__ = ("data", "key", "left", "right")
# each node has a key and data list
def __init__(self, key, data):
self.key = key
self.data = data if isinstance(data, list) else [data]
self.left = None
self.right = None
def replace(self, child, new_child):
"""
Replace this node's child with a new child.
"""
if self.left is not None and self.left == child:
self.left = new_child
elif self.right is not None and self.right == child:
self.right = new_child
else:
raise ValueError("Cannot call replace() on non-child")
def remove(self, child):
"""
Remove the given child.
"""
self.replace(child, None)
def set(self, other):
"""
Copy the given node.
"""
self.key = other.key
self.data = other.data[:]
def __str__(self):
return str((self.key, self.data))
def __repr__(self):
return str(self)
| Node |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 8709,
"end": 10704
} | class ____(String):
"""A variable length Unicode string type.
The :class:`.Unicode` type is a :class:`.String` subclass that assumes
input and output strings that may contain non-ASCII characters, and for
some backends implies an underlying column type that is explicitly
supporting of non-ASCII data, such as ``NVARCHAR`` on Oracle Database and
SQL Server. This will impact the output of ``CREATE TABLE`` statements and
``CAST`` functions at the dialect level.
The character encoding used by the :class:`.Unicode` type that is used to
transmit and receive data to the database is usually determined by the
DBAPI itself. All modern DBAPIs accommodate non-ASCII strings but may have
different methods of managing database encodings; if necessary, this
encoding should be configured as detailed in the notes for the target DBAPI
in the :ref:`dialect_toplevel` section.
In modern SQLAlchemy, use of the :class:`.Unicode` datatype does not
imply any encoding/decoding behavior within SQLAlchemy itself. In Python
3, all string objects are inherently Unicode capable, and SQLAlchemy
does not produce bytestring objects nor does it accommodate a DBAPI that
does not return Python Unicode objects in result sets for string values.
.. warning:: Some database backends, particularly SQL Server with pyodbc,
are known to have undesirable behaviors regarding data that is noted
as being of ``NVARCHAR`` type as opposed to ``VARCHAR``, including
datatype mismatch errors and non-use of indexes. See the section
on :meth:`.DialectEvents.do_setinputsizes` for background on working
around unicode character issues for backends like SQL Server with
pyodbc as well as cx_Oracle.
.. seealso::
:class:`.UnicodeText` - unlengthed textual counterpart
to :class:`.Unicode`.
:meth:`.DialectEvents.do_setinputsizes`
"""
__visit_name__ = "unicode"
| Unicode |
python | encode__starlette | starlette/middleware/cors.py | {
"start": 454,
"end": 7527
} | class ____:
def __init__(
self,
app: ASGIApp,
allow_origins: Sequence[str] = (),
allow_methods: Sequence[str] = ("GET",),
allow_headers: Sequence[str] = (),
allow_credentials: bool = False,
allow_origin_regex: str | None = None,
allow_private_network: bool = False,
expose_headers: Sequence[str] = (),
max_age: int = 600,
) -> None:
if "*" in allow_methods:
allow_methods = ALL_METHODS
compiled_allow_origin_regex = None
if allow_origin_regex is not None:
compiled_allow_origin_regex = re.compile(allow_origin_regex)
allow_all_origins = "*" in allow_origins
allow_all_headers = "*" in allow_headers
preflight_explicit_allow_origin = not allow_all_origins or allow_credentials
simple_headers: dict[str, str] = {}
if allow_all_origins:
simple_headers["Access-Control-Allow-Origin"] = "*"
if allow_credentials:
simple_headers["Access-Control-Allow-Credentials"] = "true"
if expose_headers:
simple_headers["Access-Control-Expose-Headers"] = ", ".join(expose_headers)
preflight_headers: dict[str, str] = {}
if preflight_explicit_allow_origin:
# The origin value will be set in preflight_response() if it is allowed.
preflight_headers["Vary"] = "Origin"
else:
preflight_headers["Access-Control-Allow-Origin"] = "*"
preflight_headers.update(
{
"Access-Control-Allow-Methods": ", ".join(allow_methods),
"Access-Control-Max-Age": str(max_age),
}
)
allow_headers = sorted(SAFELISTED_HEADERS | set(allow_headers))
if allow_headers and not allow_all_headers:
preflight_headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers)
if allow_credentials:
preflight_headers["Access-Control-Allow-Credentials"] = "true"
self.app = app
self.allow_origins = allow_origins
self.allow_methods = allow_methods
self.allow_headers = [h.lower() for h in allow_headers]
self.allow_all_origins = allow_all_origins
self.allow_all_headers = allow_all_headers
self.preflight_explicit_allow_origin = preflight_explicit_allow_origin
self.allow_origin_regex = compiled_allow_origin_regex
self.allow_private_network = allow_private_network
self.simple_headers = simple_headers
self.preflight_headers = preflight_headers
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] != "http": # pragma: no cover
await self.app(scope, receive, send)
return
method = scope["method"]
headers = Headers(scope=scope)
origin = headers.get("origin")
if origin is None:
await self.app(scope, receive, send)
return
if method == "OPTIONS" and "access-control-request-method" in headers:
response = self.preflight_response(request_headers=headers)
await response(scope, receive, send)
return
await self.simple_response(scope, receive, send, request_headers=headers)
def is_allowed_origin(self, origin: str) -> bool:
if self.allow_all_origins:
return True
if self.allow_origin_regex is not None and self.allow_origin_regex.fullmatch(origin):
return True
return origin in self.allow_origins
def preflight_response(self, request_headers: Headers) -> Response:
requested_origin = request_headers["origin"]
requested_method = request_headers["access-control-request-method"]
requested_headers = request_headers.get("access-control-request-headers")
requested_private_network = request_headers.get("access-control-request-private-network")
headers = dict(self.preflight_headers)
failures: list[str] = []
if self.is_allowed_origin(origin=requested_origin):
if self.preflight_explicit_allow_origin:
# The "else" case is already accounted for in self.preflight_headers
# and the value would be "*".
headers["Access-Control-Allow-Origin"] = requested_origin
else:
failures.append("origin")
if requested_method not in self.allow_methods:
failures.append("method")
# If we allow all headers, then we have to mirror back any requested
# headers in the response.
if self.allow_all_headers and requested_headers is not None:
headers["Access-Control-Allow-Headers"] = requested_headers
elif requested_headers is not None:
for header in [h.lower() for h in requested_headers.split(",")]:
if header.strip() not in self.allow_headers:
failures.append("headers")
break
if requested_private_network is not None:
if self.allow_private_network:
headers["Access-Control-Allow-Private-Network"] = "true"
else:
failures.append("private-network")
# We don't strictly need to use 400 responses here, since its up to
# the browser to enforce the CORS policy, but its more informative
# if we do.
if failures:
failure_text = "Disallowed CORS " + ", ".join(failures)
return PlainTextResponse(failure_text, status_code=400, headers=headers)
return PlainTextResponse("OK", status_code=200, headers=headers)
async def simple_response(self, scope: Scope, receive: Receive, send: Send, request_headers: Headers) -> None:
send = functools.partial(self.send, send=send, request_headers=request_headers)
await self.app(scope, receive, send)
async def send(self, message: Message, send: Send, request_headers: Headers) -> None:
if message["type"] != "http.response.start":
await send(message)
return
message.setdefault("headers", [])
headers = MutableHeaders(scope=message)
headers.update(self.simple_headers)
origin = request_headers["Origin"]
has_cookie = "cookie" in request_headers
# If request includes any cookie headers, then we must respond
# with the specific origin instead of '*'.
if self.allow_all_origins and has_cookie:
self.allow_explicit_origin(headers, origin)
# If we only allow specific origins, then we have to mirror back
# the Origin header in the response.
elif not self.allow_all_origins and self.is_allowed_origin(origin=origin):
self.allow_explicit_origin(headers, origin)
await send(message)
@staticmethod
def allow_explicit_origin(headers: MutableHeaders, origin: str) -> None:
headers["Access-Control-Allow-Origin"] = origin
headers.add_vary_header("Origin")
| CORSMiddleware |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_sentry.py | {
"start": 2138,
"end": 2322
} | class ____:
pass
def is_configured(obj):
from airflow.sdk.execution_time.sentry.configured import ConfiguredSentry
return isinstance(obj, ConfiguredSentry)
| CustomTransport |
python | huggingface__transformers | src/transformers/models/zoedepth/modeling_zoedepth.py | {
"start": 35983,
"end": 38460
} | class ____(nn.Module):
def __init__(self, config):
"""ViT-like transformer block
Args:
config (`ZoeDepthConfig`):
Model configuration class defining the model architecture.
"""
super().__init__()
in_channels = config.bottleneck_features
self.transformer_encoder = nn.ModuleList(
[ZoeDepthTransformerEncoderLayer(config) for _ in range(config.num_patch_transformer_layers)]
)
self.embedding_convPxP = nn.Conv2d(
in_channels, config.patch_transformer_hidden_size, kernel_size=1, stride=1, padding=0
)
def positional_encoding_1d(self, batch_size, sequence_length, embedding_dim, device="cpu", dtype=torch.float32):
"""Generate positional encodings
Args:
sequence_length (int): Sequence length
embedding_dim (int): Embedding dimension
Returns:
torch.Tensor: Positional encodings.
"""
position = torch.arange(0, sequence_length, dtype=dtype, device=device).unsqueeze(1)
index = torch.arange(0, embedding_dim, 2, dtype=dtype, device=device).unsqueeze(0)
div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim))
pos_encoding = position * div_term
pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1)
pos_encoding = pos_encoding.unsqueeze(dim=0).repeat(batch_size, 1, 1)
return pos_encoding
def forward(self, x):
"""Forward pass
Args:
x (torch.Tensor - NCHW): Input feature tensor
Returns:
torch.Tensor - Transformer output embeddings of shape (batch_size, sequence_length, embedding_dim)
"""
embeddings = self.embedding_convPxP(x).flatten(2) # shape (batch_size, num_channels, sequence_length)
# add an extra special CLS token at the start for global accumulation
embeddings = nn.functional.pad(embeddings, (1, 0))
embeddings = embeddings.permute(0, 2, 1)
batch_size, sequence_length, embedding_dim = embeddings.shape
embeddings = embeddings + self.positional_encoding_1d(
batch_size, sequence_length, embedding_dim, device=embeddings.device, dtype=embeddings.dtype
)
for i in range(4):
embeddings = self.transformer_encoder[i](embeddings)
return embeddings
| ZoeDepthPatchTransformerEncoder |
python | pytorch__pytorch | torch/distributed/checkpoint/examples/stateful_example.py | {
"start": 565,
"end": 2817
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(0)
self.net1 = nn.Sequential(nn.Linear(8, 16), nn.ReLU())
self.net2 = nn.Sequential(nn.Linear(16, 32), nn.ReLU())
self.net3 = nn.Linear(32, 64)
self.net4 = nn.Sequential(nn.ReLU(), nn.Linear(64, 8))
def forward(self, x):
return self.net4(self.net3(self.net2(self.net1(x))))
def get_input(self):
return torch.rand(8, 8, device="cuda")
def _make_stateful(model, optim):
_patch_model_state_dict(model)
_patch_optimizer_state_dict(model, optimizers=optim)
def _train(model, optim, train_steps=1):
torch.manual_seed(0)
loss = None
for _ in range(train_steps):
loss = model(model.get_input()).sum()
loss.backward()
optim.step()
optim.zero_grad()
return loss
def _init_model(device, world_size):
device_mesh = init_device_mesh(device, (world_size,))
model = Model().cuda()
model = FSDP(
model,
device_mesh=device_mesh,
use_orig_params=True,
)
optim = torch.optim.Adam(model.parameters(), lr=0.1)
_make_stateful(model, optim)
return model, optim
def run(rank, world_size, device="cuda"):
# Set up world pg
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
dist.init_process_group("cpu:gloo,cuda:nccl", rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
model, optim = _init_model(device, world_size)
_train(model, optim, train_steps=2)
dcp.save(
state_dict={"model": model, "optimizer": optim},
checkpoint_id=CHECKPOINT_DIR,
)
# presumably do something else
model, optim = _init_model(device, world_size)
dcp.load(
state_dict={"model": model, "optimizer": optim},
checkpoint_id=CHECKPOINT_DIR,
)
_train(model, optim, train_steps=2)
if __name__ == "__main__":
world_size = torch.cuda.device_count()
print(f"Running stateful checkpoint example on {world_size} devices.")
shutil.rmtree(CHECKPOINT_DIR, ignore_errors=True)
mp.spawn(
run,
args=(world_size,),
nprocs=world_size,
join=True,
)
| Model |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/hooks/test_container_instance.py | {
"start": 5573,
"end": 6902
} | class ____:
@patch("airflow.providers.microsoft.azure.hooks.container_instance.ContainerInstanceManagementClient")
@patch("azure.common.credentials.ServicePrincipalCredentials")
@patch("airflow.providers.microsoft.azure.hooks.container_instance.get_sync_default_azure_credential")
def test_get_conn_fallback_to_default_azure_credential(
self,
mock_default_azure_credential,
mock_service_pricipal_credential,
mock_client_cls,
connection_without_login_password_tenant_id,
):
mock_credential = MagicMock()
mock_default_azure_credential.return_value = mock_credential
mock_client_instance = MagicMock()
mock_client_cls.return_value = mock_client_instance
hook = AzureContainerInstanceHook(azure_conn_id=connection_without_login_password_tenant_id.conn_id)
conn = hook.get_conn()
mock_default_azure_credential.assert_called_with(
managed_identity_client_id=None, workload_identity_tenant_id=None
)
assert not mock_service_pricipal_credential.called
assert conn == mock_client_instance
mock_client_cls.assert_called_once_with(
credential=mock_credential,
subscription_id="subscription_id",
)
| TestAzureContainerInstanceHookWithoutSetupCredential |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_typed_mapping.py | {
"start": 6772,
"end": 38819
} | class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
@testing.combinations(
"default", "insert_default", argnames="use_paramname"
)
@testing.combinations(True, False, argnames="use_none")
def test_col_defaults(self, use_paramname, use_none, decl_base):
class Foo(decl_base):
__tablename__ = "foo"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[int] = mapped_column(
**{use_paramname: None if use_none else 5}
)
if use_none:
assert not Foo.__table__.c.data.default
else:
eq_(Foo.__table__.c.data.default.arg, 5)
def test_type_inline_declaration(self, decl_base):
"""test #10899"""
class User(decl_base):
__tablename__ = "user"
class Role(enum.Enum):
admin = "admin"
user = "user"
id: Mapped[int] = mapped_column(primary_key=True)
role: Mapped[Role]
is_true(isinstance(User.__table__.c.role.type, Enum))
eq_(User.__table__.c.role.type.length, 5)
is_(User.__table__.c.role.type.enum_class, User.Role)
eq_(User.__table__.c.role.type.name, "role") # and not 'enum'
def test_type_uses_inner_when_present(self, decl_base):
"""test #10899, that we use inner name when appropriate"""
class Role(enum.Enum):
foo = "foo"
bar = "bar"
class User(decl_base):
__tablename__ = "user"
class Role(enum.Enum):
admin = "admin"
user = "user"
id: Mapped[int] = mapped_column(primary_key=True)
role: Mapped[Role]
is_true(isinstance(User.__table__.c.role.type, Enum))
eq_(User.__table__.c.role.type.length, 5)
is_(User.__table__.c.role.type.enum_class, User.Role)
eq_(User.__table__.c.role.type.name, "role") # and not 'enum'
def test_legacy_declarative_base(self):
typ = VARCHAR(50)
Base = declarative_base(type_annotation_map={str: typ})
class MyClass(Base):
__tablename__ = "mytable"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str]
x: Mapped[int]
is_(MyClass.__table__.c.data.type, typ)
is_true(MyClass.__table__.c.id.primary_key)
@testing.variation("style", ["none", "lambda_", "string", "direct"])
def test_foreign_annotation_propagates_correctly(self, decl_base, style):
"""test #10597"""
class Parent(decl_base):
__tablename__ = "parent"
id: Mapped[int] = mapped_column(primary_key=True)
class Child(decl_base):
__tablename__ = "child"
name: Mapped[str] = mapped_column(primary_key=True)
if style.none:
parent_id: Mapped[int] = mapped_column(ForeignKey("parent.id"))
else:
parent_id: Mapped[int] = mapped_column()
if style.lambda_:
parent: Mapped[Parent] = relationship(
primaryjoin=lambda: remote(Parent.id)
== foreign(Child.parent_id),
)
elif style.string:
parent: Mapped[Parent] = relationship(
primaryjoin="remote(Parent.id) == "
"foreign(Child.parent_id)",
)
elif style.direct:
parent: Mapped[Parent] = relationship(
primaryjoin=remote(Parent.id) == foreign(parent_id),
)
elif style.none:
parent: Mapped[Parent] = relationship()
assert Child.__mapper__.attrs.parent.strategy.use_get
def test_required_no_arg(self, decl_base):
with expect_raises_message(
sa_exc.ArgumentError,
r"Python typing annotation is required for attribute "
r'"A.data" when primary '
r'argument\(s\) for "MappedColumn" construct are None or '
r"not present",
):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data = mapped_column()
@testing.variation("case", ["key", "name", "both"])
@testing.variation("deferred", [True, False])
@testing.variation("use_add_property", [True, False])
def test_separate_name(self, decl_base, case, deferred, use_add_property):
if case.key:
args = {"key": "data_"}
elif case.name:
args = {"name": "data_"}
else:
args = {"name": "data_", "key": "data_"}
if deferred:
args["deferred"] = True
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
if not use_add_property:
data: Mapped[str] = mapped_column(**args)
if use_add_property:
args["type_"] = String()
A.data = mapped_column(**args)
assert not hasattr(A, "data_")
is_(A.data.property.expression, A.__table__.c.data_)
eq_(A.__table__.c.data_.key, "data_")
def test_construct_rhs(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id = mapped_column("id", Integer, primary_key=True)
name = mapped_column(String(50))
self.assert_compile(
select(User), "SELECT users.id, users.name FROM users"
)
eq_(User.__mapper__.primary_key, (User.__table__.c.id,))
def test_construct_lhs(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
data: Mapped[Optional[str]] = mapped_column()
self.assert_compile(
select(User), "SELECT users.id, users.name, users.data FROM users"
)
eq_(User.__mapper__.primary_key, (User.__table__.c.id,))
is_false(User.__table__.c.id.nullable)
is_false(User.__table__.c.name.nullable)
is_true(User.__table__.c.data.nullable)
def test_construct_lhs_omit_mapped_column(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
data: Mapped[Optional[str]]
x: Mapped[int]
y: Mapped[int]
created_at: Mapped[datetime.datetime]
self.assert_compile(
select(User),
"SELECT users.id, users.name, users.data, users.x, "
"users.y, users.created_at FROM users",
)
eq_(User.__mapper__.primary_key, (User.__table__.c.id,))
is_false(User.__table__.c.id.nullable)
is_false(User.__table__.c.name.nullable)
is_true(User.__table__.c.data.nullable)
assert isinstance(User.__table__.c.created_at.type, DateTime)
def test_i_have_a_classvar_on_my_class(self, decl_base):
class MyClass(decl_base):
__tablename__ = "mytable"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(default="some default")
status: ClassVar[int]
m1 = MyClass(id=1, data=5)
assert "status" not in inspect(m1).mapper.attrs
def test_i_have_plain_or_column_attrs_on_my_class_w_values(
self, decl_base
):
class MyClass(decl_base):
__tablename__ = "mytable"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(default="some default")
old_column: str = Column(String)
# we assume this is intentional
status: int = 5
# it's mapped too
assert "old_column" in inspect(MyClass).attrs
def test_i_have_plain_attrs_on_my_class_disallowed(self, decl_base):
with expect_annotation_syntax_error("MyClass.status"):
class MyClass(decl_base):
__tablename__ = "mytable"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(default="some default")
# we assume this is not intentional. because I made the
# same mistake myself :)
status: int
def test_i_have_plain_attrs_on_my_class_allowed(self, decl_base):
class MyClass(decl_base):
__tablename__ = "mytable"
__allow_unmapped__ = True
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(default="some default")
status: int
def test_allow_unmapped_on_mixin(self, decl_base):
class AllowsUnmapped:
__allow_unmapped__ = True
class MyClass(AllowsUnmapped, decl_base):
__tablename__ = "mytable"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(default="some default")
status: int
def test_allow_unmapped_on_base(self):
class Base(DeclarativeBase):
__allow_unmapped__ = True
class MyClass(Base):
__tablename__ = "mytable"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(default="some default")
status: int
@testing.variation("annotation", ["none", "any", "datatype"])
@testing.variation("explicit_name", [True, False])
@testing.variation("attribute", ["column", "deferred"])
def test_allow_unmapped_cols(self, annotation, explicit_name, attribute):
class Base(DeclarativeBase):
__allow_unmapped__ = True
if attribute.column:
if explicit_name:
attr = Column("data_one", Integer)
else:
attr = Column(Integer)
elif attribute.deferred:
if explicit_name:
attr = deferred(Column("data_one", Integer))
else:
attr = deferred(Column(Integer))
else:
attribute.fail()
class MyClass(Base):
__tablename__ = "mytable"
id: Mapped[int] = mapped_column(primary_key=True)
if annotation.none:
data = attr
elif annotation.any:
data: Any = attr
elif annotation.datatype:
data: int = attr
else:
annotation.fail()
if explicit_name:
eq_(MyClass.__table__.c.keys(), ["id", "data_one"])
else:
eq_(MyClass.__table__.c.keys(), ["id", "data"])
def test_column_default(self, decl_base):
class MyClass(decl_base):
__tablename__ = "mytable"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(default="some default")
mc = MyClass()
assert "data" not in mc.__dict__
eq_(MyClass.__table__.c.data.default.arg, "some default")
def test_anno_w_fixed_table(self, decl_base):
users = Table(
"users",
decl_base.metadata,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("data", String(50)),
Column("x", Integer),
Column("y", Integer),
Column("created_at", DateTime),
)
class User(decl_base):
__table__ = users
id: Mapped[int]
name: Mapped[str]
data: Mapped[Optional[str]]
x: Mapped[int]
y: Mapped[int]
created_at: Mapped[datetime.datetime]
self.assert_compile(
select(User),
"SELECT users.id, users.name, users.data, users.x, "
"users.y, users.created_at FROM users",
)
eq_(User.__mapper__.primary_key, (User.__table__.c.id,))
is_false(User.__table__.c.id.nullable)
is_false(User.__table__.c.name.nullable)
is_true(User.__table__.c.data.nullable)
assert isinstance(User.__table__.c.created_at.type, DateTime)
def test_construct_rhs_type_override_lhs(self, decl_base):
class Element(decl_base):
__tablename__ = "element"
id: Mapped[int] = mapped_column(BIGINT, primary_key=True)
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(BIGINT, primary_key=True)
other_id: Mapped[int] = mapped_column(ForeignKey("element.id"))
data: Mapped[int] = mapped_column()
# exact class test
is_(User.__table__.c.id.type.__class__, BIGINT)
is_(User.__table__.c.other_id.type.__class__, BIGINT)
is_(User.__table__.c.data.type.__class__, Integer)
@testing.combinations(True, False, argnames="include_rhs_type")
@testing.combinations(True, False, argnames="use_mixin")
def test_construct_nullability_overrides(
self, decl_base, include_rhs_type, use_mixin
):
if include_rhs_type:
args = (String,)
else:
args = ()
# anno only: global anno_str, anno_str_optional, anno_str_mc
# anno only: global anno_str_optional_mc, anno_str_mc_nullable
# anno only: global anno_str_optional_mc_notnull
# anno only: global newtype_str
anno_str = Annotated[str, 50]
anno_str_optional = Annotated[Optional[str], 30]
newtype_str = NewType("newtype_str", str)
anno_str_mc = Annotated[str, mapped_column()]
anno_str_optional_mc = Annotated[Optional[str], mapped_column()]
anno_str_mc_nullable = Annotated[str, mapped_column(nullable=True)]
anno_str_optional_mc_notnull = Annotated[
Optional[str], mapped_column(nullable=False)
]
decl_base.registry.update_type_annotation_map(
{
anno_str: String(50),
anno_str_optional: String(30),
newtype_str: String(40),
}
)
if TYPE_CHECKING:
class user_base:
pass
else:
if use_mixin:
user_base = object
else:
user_base = decl_base
class UserPossibleMixin(user_base):
if not use_mixin:
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True) # noqa: A001
lnnl_rndf: Mapped[str] = mapped_column(*args)
lnnl_rnnl: Mapped[str] = mapped_column(*args, nullable=False)
lnnl_rnl: Mapped[str] = mapped_column(*args, nullable=True)
lnl_rndf: Mapped[Optional[str]] = mapped_column(*args)
lnl_rnnl: Mapped[Optional[str]] = mapped_column(
*args, nullable=False
)
lnl_rnl: Mapped[Optional[str]] = mapped_column(
*args, nullable=True
)
# test #9177 cases
anno_1a: Mapped[anno_str] = mapped_column(*args)
anno_1b: Mapped[anno_str] = mapped_column(*args, nullable=True)
anno_1c: Mapped[anno_str] = mapped_column(*args, deferred=True)
anno_1d: Mapped[anno_str] = mapped_column(
*args, deferred=True, deferred_group="mygroup"
)
anno_2a: Mapped[anno_str_optional] = mapped_column(*args)
anno_2b: Mapped[anno_str_optional] = mapped_column(
*args, nullable=False
)
anno_3a: Mapped[anno_str_mc] = mapped_column(*args)
anno_3b: Mapped[anno_str_mc] = mapped_column(*args, nullable=True)
anno_3c: Mapped[Optional[anno_str_mc]] = mapped_column(*args)
anno_4a: Mapped[anno_str_optional_mc] = mapped_column(*args)
anno_4b: Mapped[anno_str_optional_mc] = mapped_column(
*args, nullable=False
)
anno_5a: Mapped[anno_str_mc_nullable] = mapped_column(*args)
anno_5b: Mapped[anno_str_mc_nullable] = mapped_column(
*args, nullable=False
)
anno_6a: Mapped[anno_str_optional_mc_notnull] = mapped_column(
*args
)
anno_6b: Mapped[anno_str_optional_mc_notnull] = mapped_column(
*args, nullable=True
)
newtype_1a: Mapped[newtype_str] = mapped_column(*args)
newtype_1b: Mapped[newtype_str] = mapped_column(
*args, nullable=True
)
if use_mixin:
class User(UserPossibleMixin, decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
else:
User = UserPossibleMixin
eq_(User.anno_1b.property.deferred, False)
eq_(User.anno_1c.property.deferred, True)
eq_(User.anno_1d.property.group, "mygroup")
is_false(User.__table__.c.lnnl_rndf.nullable)
is_false(User.__table__.c.lnnl_rnnl.nullable)
is_true(User.__table__.c.lnnl_rnl.nullable)
is_true(User.__table__.c.lnl_rndf.nullable)
is_false(User.__table__.c.lnl_rnnl.nullable)
is_true(User.__table__.c.lnl_rnl.nullable)
is_false(User.__table__.c.anno_1a.nullable)
is_true(User.__table__.c.anno_1b.nullable)
is_true(User.__table__.c.anno_2a.nullable)
is_false(User.__table__.c.anno_2b.nullable)
is_false(User.__table__.c.anno_3a.nullable)
is_true(User.__table__.c.anno_3b.nullable)
is_true(User.__table__.c.anno_3c.nullable)
is_true(User.__table__.c.anno_4a.nullable)
is_false(User.__table__.c.anno_4b.nullable)
is_true(User.__table__.c.anno_5a.nullable)
is_false(User.__table__.c.anno_5b.nullable)
is_false(User.__table__.c.anno_6a.nullable)
is_true(User.__table__.c.anno_6b.nullable)
# test #8410
is_false(User.__table__.c.lnnl_rndf._copy().nullable)
is_false(User.__table__.c.lnnl_rnnl._copy().nullable)
is_true(User.__table__.c.lnnl_rnl._copy().nullable)
is_true(User.__table__.c.lnl_rndf._copy().nullable)
is_false(User.__table__.c.lnl_rnnl._copy().nullable)
is_true(User.__table__.c.lnl_rnl._copy().nullable)
def test_fwd_refs(self, decl_base: Type[DeclarativeBase]):
# TODO: add an assertion?
class MyClass(decl_base):
__tablename__ = "my_table"
id: Mapped["int"] = mapped_column(primary_key=True)
data_one: Mapped["str"]
def test_typing_literal_identity(self, decl_base):
"""See issue #11820"""
# anno only: global _TypingLiteral, _TypingExtensionsLiteral
_TypingLiteral = typing.Literal["a", "b"]
_TypingExtensionsLiteral = typing_extensions.Literal["a", "b"]
class Foo(decl_base):
__tablename__ = "footable"
id: Mapped[int] = mapped_column(primary_key=True)
t: Mapped[_TypingLiteral]
te: Mapped[_TypingExtensionsLiteral]
for col in (Foo.__table__.c.t, Foo.__table__.c.te):
is_true(isinstance(col.type, Enum))
eq_(col.type.enums, ["a", "b"])
is_(col.type.native_enum, False)
@staticmethod
def annotated_name_test_cases():
return [
("sort_order", 100, lambda sort_order: sort_order == 100),
("nullable", False, lambda column: column.nullable is False),
(
"active_history",
True,
lambda column_property: column_property.active_history is True,
),
(
"deferred",
True,
lambda column_property: column_property.deferred is True,
),
(
"deferred",
_NoArg.NO_ARG,
lambda column_property: column_property is None,
),
(
"deferred_group",
"mygroup",
lambda column_property: column_property.deferred is True
and column_property.group == "mygroup",
),
(
"deferred_raiseload",
True,
lambda column_property: column_property.deferred is True
and column_property.raiseload is True,
),
(
"server_default",
"25",
lambda column: column.server_default.arg == "25",
),
(
"server_onupdate",
"25",
lambda column: column.server_onupdate.arg == "25",
),
(
"default",
25,
lambda column: column.default.arg == 25,
),
(
"insert_default",
25,
lambda column: column.default.arg == 25,
),
(
"onupdate",
25,
lambda column: column.onupdate.arg == 25,
),
("doc", "some doc", lambda column: column.doc == "some doc"),
(
"comment",
"some comment",
lambda column: column.comment == "some comment",
),
("index", True, lambda column: column.index is True),
("index", _NoArg.NO_ARG, lambda column: column.index is None),
("index", False, lambda column: column.index is False),
("unique", True, lambda column: column.unique is True),
("unique", False, lambda column: column.unique is False),
(
"autoincrement",
True,
lambda column: column.autoincrement is True,
),
("system", True, lambda column: column.system is True),
("primary_key", True, lambda column: column.primary_key is True),
("type_", BIGINT, lambda column: isinstance(column.type, BIGINT)),
(
"info",
{"foo": "bar"},
lambda column: column.info == {"foo": "bar"},
),
(
"use_existing_column",
True,
lambda mc: mc._use_existing_column is True,
),
(
"quote",
True,
exc.SADeprecationWarning(
"Can't use the 'key' or 'name' arguments in Annotated "
),
),
(
"key",
"mykey",
exc.SADeprecationWarning(
"Can't use the 'key' or 'name' arguments in Annotated "
),
),
(
"name",
"mykey",
exc.SADeprecationWarning(
"Can't use the 'key' or 'name' arguments in Annotated "
),
),
(
"kw_only",
True,
exc.SADeprecationWarning(
"Argument 'kw_only' is a dataclass argument "
),
),
(
"compare",
True,
exc.SADeprecationWarning(
"Argument 'compare' is a dataclass argument "
),
),
(
"default_factory",
lambda: 25,
exc.SADeprecationWarning(
"Argument 'default_factory' is a dataclass argument "
),
),
(
"repr",
True,
exc.SADeprecationWarning(
"Argument 'repr' is a dataclass argument "
),
),
(
"init",
True,
exc.SADeprecationWarning(
"Argument 'init' is a dataclass argument"
),
),
(
"hash",
True,
exc.SADeprecationWarning(
"Argument 'hash' is a dataclass argument"
),
),
(
"dataclass_metadata",
{},
exc.SADeprecationWarning(
"Argument 'dataclass_metadata' is a dataclass argument"
),
),
]
def test_we_got_all_attrs_test_annotated(self):
argnames = _py_inspect.getfullargspec(mapped_column)
_annotated_names_tested = {
case[0] for case in self.annotated_name_test_cases()
}
assert _annotated_names_tested.issuperset(argnames.kwonlyargs), (
f"annotated attributes were not tested: "
f"{set(argnames.kwonlyargs).difference(_annotated_names_tested)}"
)
@testing.combinations_list(
annotated_name_test_cases(),
argnames="argname, argument, assertion",
)
@testing.variation("use_annotated", [True, False, "control"])
def test_names_encountered_for_annotated(
self, argname, argument, assertion, use_annotated, decl_base
):
# anno only: global myint
if argument is not _NoArg.NO_ARG:
kw = {argname: argument}
if argname == "quote":
kw["name"] = "somename"
else:
kw = {}
is_warning = isinstance(assertion, exc.SADeprecationWarning)
is_dataclass = argname in (
"kw_only",
"init",
"repr",
"compare",
"default_factory",
"hash",
"dataclass_metadata",
)
if is_dataclass:
class Base(MappedAsDataclass, decl_base):
__abstract__ = True
else:
Base = decl_base
if use_annotated.control:
# test in reverse; that kw set on the main mapped_column() takes
# effect when the Annotated is there also and does not have the
# kw
amc = mapped_column()
myint = Annotated[int, amc]
mc = mapped_column(**kw)
class User(Base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
myname: Mapped[myint] = mc
elif use_annotated:
amc = mapped_column(**kw)
myint = Annotated[int, amc]
mc = mapped_column()
if is_warning:
with expect_deprecated(assertion.args[0]):
class User(Base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
myname: Mapped[myint] = mc
else:
class User(Base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
myname: Mapped[myint] = mc
else:
mc = cast(MappedColumn, mapped_column(**kw))
mapper_prop = mc.mapper_property_to_assign
column_to_assign, sort_order = mc.columns_to_assign[0]
if not is_warning:
assert_result = testing.resolve_lambda(
assertion,
sort_order=sort_order,
column_property=mapper_prop,
column=column_to_assign,
mc=mc,
)
assert assert_result
elif is_dataclass and (not use_annotated or use_annotated.control):
eq_(
getattr(mc._attribute_options, f"dataclasses_{argname}"),
argument,
)
@testing.combinations(("index",), ("unique",), argnames="paramname")
@testing.combinations((True,), (False,), (None,), argnames="orig")
@testing.combinations((True,), (False,), (None,), argnames="merging")
def test_index_unique_combinations(
self, paramname, orig, merging, decl_base
):
"""test #11091"""
# anno only: global myint
amc = mapped_column(**{paramname: merging})
myint = Annotated[int, amc]
mc = mapped_column(**{paramname: orig})
class User(decl_base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
myname: Mapped[myint] = mc
result = getattr(User.__table__.c.myname, paramname)
if orig is None:
is_(result, merging)
else:
is_(result, orig)
def test_missing_mapped_lhs(self, decl_base):
with expect_annotation_syntax_error("User.name"):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
name: str = mapped_column() # type: ignore
def test_construct_lhs_separate_name(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
data: Mapped[Optional[str]] = mapped_column("the_data")
self.assert_compile(
select(User.data), "SELECT users.the_data FROM users"
)
is_true(User.__table__.c.the_data.nullable)
def test_construct_works_in_expr(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
class Address(decl_base):
__tablename__ = "addresses"
id: Mapped[int] = mapped_column(primary_key=True)
user_id: Mapped[int] = mapped_column(ForeignKey("users.id"))
user = relationship(User, primaryjoin=user_id == User.id)
self.assert_compile(
select(Address.user_id, User.id).join(Address.user),
"SELECT addresses.user_id, users.id FROM addresses "
"JOIN users ON addresses.user_id = users.id",
)
def test_construct_works_as_polymorphic_on(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
type: Mapped[str] = mapped_column()
__mapper_args__ = {"polymorphic_on": type}
decl_base.registry.configure()
is_(User.__table__.c.type, User.__mapper__.polymorphic_on)
def test_construct_works_as_version_id_col(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
version_id: Mapped[int] = mapped_column()
__mapper_args__ = {"version_id_col": version_id}
decl_base.registry.configure()
is_(User.__table__.c.version_id, User.__mapper__.version_id_col)
def test_construct_works_in_deferred(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = deferred(mapped_column())
self.assert_compile(select(User), "SELECT users.id FROM users")
self.assert_compile(
select(User).options(undefer(User.data)),
"SELECT users.id, users.data FROM users",
)
def test_deferred_kw(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(deferred=True)
self.assert_compile(select(User), "SELECT users.id FROM users")
self.assert_compile(
select(User).options(undefer(User.data)),
"SELECT users.id, users.data FROM users",
)
| MappedColumnTest |
python | conda__conda | conda/exceptions.py | {
"start": 32430,
"end": 32947
} | class ____(CondaError, ValueError):
def __init__(self, packages_with_cycles: Iterable[PackageRecord], **kwargs):
from .models.records import PackageRecord
packages_with_cycles = tuple(
PackageRecord.from_objects(p) for p in packages_with_cycles
)
message = f"Cyclic dependencies exist among these items: {dashlist(p.dist_str() for p in packages_with_cycles)}"
super().__init__(message, packages_with_cycles=packages_with_cycles, **kwargs)
| CyclicalDependencyError |
python | huggingface__transformers | tests/models/clvp/test_modeling_clvp.py | {
"start": 10428,
"end": 12218
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (ClvpModel, ClvpForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": ClvpModelForConditionalGeneration} if is_torch_available() else {}
def setUp(self):
self.model_tester = ClvpDecoderTester(self)
self.decoder_config_tester = ConfigTester(self, config_class=ClvpDecoderConfig, hidden_size=32)
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
cleanup(torch_device)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
if return_labels and model_class == ClvpForCausalLM:
inputs_dict["labels"] = torch.zeros(
[self.model_tester.batch_size, self.model_tester.seq_length], device=torch_device
).long()
return inputs_dict
def test_training(self):
# we will only test the ClvpForCausalLM since it outputs loss
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
model = ClvpForCausalLM(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@unittest.skip(reason="Clvp `prepare_inputs_for_generation` function doesn't have cache position.")
def test_generate_continue_from_inputs_embeds(self):
pass
| ClvpDecoderTest |
python | ray-project__ray | doc/source/ray-core/doc_code/direct_transport_gloo.py | {
"start": 1240,
"end": 1928
} | class ____:
@ray.method(tensor_transport="gloo")
def random_tensor(self):
return torch.randn(1000, 1000)
def sum(self, tensor: torch.Tensor):
return torch.sum(tensor)
sender, receiver = MyActor.remote(), MyActor.remote()
group = create_collective_group([sender, receiver], backend="torch_gloo")
# The tensor will be stored by the `sender` actor instead of in Ray's object
# store.
tensor = sender.random_tensor.remote()
result = receiver.sum.remote(tensor)
print(ray.get(result))
# __gloo_full_example_end__
# __gloo_multiple_tensors_example_start__
import torch
import ray
from ray.experimental.collective import create_collective_group
@ray.remote
| MyActor |
python | conda__conda | conda/exceptions.py | {
"start": 42173,
"end": 42305
} | class ____(CondaError):
def __init__(self, msg: str, *args, **kwargs):
super().__init__(msg, *args, **kwargs)
| SpecNotFound |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/vultr.py | {
"start": 725,
"end": 1489
} | class ____(CloudEnvironment):
"""Updates integration test environment after delegation. Will setup the config file as parameter."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
env_vars = dict(
VULTR_API_KEY=parser.get('default', 'key'),
)
display.sensitive.add(env_vars['VULTR_API_KEY'])
ansible_vars = dict(
vultr_resource_prefix=self.resource_prefix,
)
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| VultrCloudEnvironment |
python | huggingface__transformers | src/transformers/models/vit_mae/modeling_vit_mae.py | {
"start": 2353,
"end": 2873
} | class ____(ModelOutput):
r"""
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`):
Pixel reconstruction logits.
"""
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Class for ViTMAEForPreTraining's outputs, with potential hidden states and attentions.
"""
)
| ViTMAEDecoderOutput |
python | Netflix__metaflow | metaflow/user_configs/config_parameters.py | {
"start": 1283,
"end": 7728
} | class ____(collections.abc.Mapping, dict):
"""
ConfigValue is a thin wrapper around an arbitrarily nested dictionary-like
configuration object. It allows you to access elements of this nested structure
using either a "." notation or a [] notation. As an example, if your configuration
object is:
{"foo": {"bar": 42}}
you can access the value 42 using either config["foo"]["bar"] or config.foo.bar.
All "keys"" need to be valid Python identifiers
"""
# Thin wrapper to allow configuration values to be accessed using a "." notation
# as well as a [] notation.
# We inherit from dict to allow the isinstanceof check to work easily and also
# to provide a simple json dumps functionality.
def __init__(self, data: Union["ConfigValue", Dict[str, Any]]):
self._data = {k: self._construct(v) for k, v in data.items()}
# Enable json dumps
dict.__init__(self, self._data)
@classmethod
def fromkeys(cls, iterable: Iterable, value: Any = None) -> "ConfigValue":
"""
Creates a new ConfigValue object from the given iterable and value.
Parameters
----------
iterable : Iterable
Iterable to create the ConfigValue from.
value : Any, optional
Value to set for each key in the iterable.
Returns
-------
ConfigValue
A new ConfigValue object.
"""
return cls(dict.fromkeys(iterable, value))
def to_dict(self) -> Dict[Any, Any]:
"""
Returns a dictionary representation of this configuration object.
Returns
-------
Dict[Any, Any]
Dictionary equivalent of this configuration object.
"""
return self._to_dict(self._data)
def copy(self) -> "ConfigValue":
return self.__copy__()
def clear(self) -> None:
# Prevent configuration modification
raise TypeError("ConfigValue is immutable")
def update(self, *args, **kwargs) -> None:
# Prevent configuration modification
raise TypeError("ConfigValue is immutable")
def setdefault(self, key: Any, default: Any = None) -> Any:
# Prevent configuration modification
raise TypeError("ConfigValue is immutable")
def pop(self, key: Any, default: Any = None) -> Any:
# Prevent configuration modification
raise TypeError("ConfigValue is immutable")
def popitem(self) -> Tuple[Any, Any]:
# Prevent configuration modification
raise TypeError("ConfigValue is immutable")
def __getattr__(self, key: str) -> Any:
"""
Access an element of this configuration
Parameters
----------
key : str
Element to access
Returns
-------
Any
Element of the configuration
"""
if key == "_data":
# Called during unpickling. Special case to not run into infinite loop
# below.
raise AttributeError(key)
if key in self._data:
return self[key]
raise AttributeError(key)
def __setattr__(self, name: str, value: Any) -> None:
# Prevent configuration modification
if name == "_data":
return super().__setattr__(name, value)
raise TypeError("ConfigValue is immutable")
def __getitem__(self, key: Any) -> Any:
"""
Access an element of this configuration
Parameters
----------
key : Any
Element to access
Returns
-------
Any
Element of the configuration
"""
return self._data[key]
def __setitem__(self, key: Any, value: Any) -> None:
# Prevent configuration modification
raise TypeError("ConfigValue is immutable")
def __delattr__(self, key) -> None:
# Prevent configuration modification
raise TypeError("ConfigValue is immutable")
def __delitem__(self, key: Any) -> None:
# Prevent configuration modification
raise TypeError("ConfigValue is immutable")
def __len__(self) -> int:
return len(self._data)
def __iter__(self) -> Iterator:
return iter(self._data)
def __eq__(self, other: Any) -> bool:
if isinstance(other, ConfigValue):
return self._data == other._data
if isinstance(other, dict):
return self._data == other
return False
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __copy__(self) -> "ConfigValue":
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update({k: copy.copy(v) for k, v in self.__dict__.items()})
return result
def __repr__(self) -> str:
return repr(self._data)
def __str__(self) -> str:
return str(self._data)
def __dir__(self) -> Iterable[str]:
return dir(type(self)) + [k for k in self._data.keys() if ID_PATTERN.match(k)]
def __contains__(self, key: Any) -> bool:
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Returns the keys of this configuration object.
Returns
-------
Any
Keys of this configuration object.
"""
return self._data.keys()
@classmethod
def _construct(cls, obj: Any) -> Any:
# Internal method to construct a ConfigValue so that all mappings internally
# are also converted to ConfigValue
if isinstance(obj, ConfigValue):
v = obj
elif isinstance(obj, collections.abc.Mapping):
v = ConfigValue({k: cls._construct(v) for k, v in obj.items()})
elif isinstance(obj, (list, tuple, set)):
v = type(obj)([cls._construct(x) for x in obj])
else:
v = obj
return v
@classmethod
def _to_dict(cls, obj: Any) -> Any:
# Internal method to convert all nested mappings to dicts
if isinstance(obj, collections.abc.Mapping):
v = {k: cls._to_dict(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple, set)):
v = type(obj)([cls._to_dict(x) for x in obj])
else:
v = obj
return v
def __reduce__(self):
return (self.__class__, (self.to_dict(),))
| ConfigValue |
python | openai__gym | gym/error.py | {
"start": 942,
"end": 1106
} | class ____(Error):
"""Raised when the user requests an env from the registry with an older version number than the latest env with the same name."""
| DeprecatedEnv |
python | PrefectHQ__prefect | src/integrations/prefect-azure/prefect_azure/workers/container_instance.py | {
"start": 14106,
"end": 18895
} | class ____(BaseVariables):
"""
Variables for an Azure Container Instance flow run.
"""
image: str = Field(
default_factory=get_prefect_image_name,
description=(
"The image to use for the Prefect container in the task. This value "
"defaults to a Prefect base image matching your local versions."
),
)
resource_group_name: str = Field(
default=...,
title="Azure Resource Group Name",
description=(
"The name of the Azure Resource Group in which to run Prefect ACI tasks."
),
)
subscription_id: SecretStr = Field(
default=...,
title="Azure Subscription ID",
description="The ID of the Azure subscription to create containers under.",
)
identities: Optional[List[str]] = Field(
title="Identities",
default=None,
description=(
"A list of user-assigned identities to associate with the container group. "
"The identities should be an ARM resource IDs in the form: "
"'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'." # noqa
),
)
entrypoint: Optional[str] = Field(
default=DEFAULT_CONTAINER_ENTRYPOINT,
description=(
"The entrypoint of the container you wish you run. This value "
"defaults to the entrypoint used by Prefect images and should only be "
"changed when using a custom image that is not based on an official "
"Prefect image. Any commands set on deployments will be passed "
"to the entrypoint as parameters."
),
)
image_registry: DockerRegistry = Field(
default=None,
title="Image Registry (Optional)",
description=(
"To use any private container registry with a username and password, "
"choose DockerRegistry. To use a private Azure Container Registry "
"with a managed identity, choose ACRManagedIdentity."
),
)
cpu: float = Field(
title="CPU",
default=ACI_DEFAULT_CPU,
description=(
"The number of virtual CPUs to assign to the task container. "
f"If not provided, a default value of {ACI_DEFAULT_CPU} will be used."
),
)
gpu_count: Optional[int] = Field(
title="GPU Count",
default=None,
description=(
"The number of GPUs to assign to the task container. "
"If not provided, no GPU will be used."
),
)
gpu_sku: Optional[str] = Field(
title="GPU SKU",
default=None,
description=(
"The Azure GPU SKU to use. See the ACI documentation for a list of "
"GPU SKUs available in each Azure region."
),
)
memory: float = Field(
default=ACI_DEFAULT_MEMORY,
description=(
"The amount of memory in gigabytes to provide to the ACI task. Valid "
"amounts are specified in the Azure documentation. If not provided, a "
f"default value of {ACI_DEFAULT_MEMORY} will be used unless present "
"on the task definition."
),
)
subnet_ids: Optional[List[str]] = Field(
title="Subnet IDs",
default=None,
description=("A list of subnet IDs to associate with the container group. "),
)
dns_servers: Optional[List[str]] = Field(
title="DNS Servers",
default=None,
description=("A list of DNS servers to associate with the container group."),
)
aci_credentials: AzureContainerInstanceCredentials = Field(
default_factory=AzureContainerInstanceCredentials,
description=("The credentials to use to authenticate with Azure."),
)
stream_output: bool = Field(
default=False,
description=(
"If `True`, logs will be streamed from the Prefect container to the local "
"console."
),
)
# Execution settings
task_start_timeout_seconds: int = Field(
default=240,
description=(
"The amount of time to watch for the start of the ACI container. "
"before marking it as failed."
),
)
task_watch_poll_interval: float = Field(
default=5.0,
description=(
"The number of seconds to wait between Azure API calls while monitoring "
"the state of an Azure Container Instances task."
),
)
keep_container_group: bool = Field(
default=False,
title="Keep Container Group After Completion",
description="Keep the completed container group on Azure.",
)
| AzureContainerVariables |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/attention.py | {
"start": 4023,
"end": 6984
} | class ____(torch.nn.Module):
"""
A module used to embed entities before passing them to a self-attention block.
Used in conjunction with ResidualSelfAttention to encode information about a self
and additional entities. Can also concatenate self to entities for ego-centric self-
attention. Inspired by architecture used in https://arxiv.org/pdf/1909.07528.pdf.
"""
def __init__(
self,
entity_size: int,
entity_num_max_elements: Optional[int],
embedding_size: int,
):
"""
Constructs an EntityEmbedding module.
:param x_self_size: Size of "self" entity.
:param entity_size: Size of other entities.
:param entity_num_max_elements: Maximum elements for a given entity, None for unrestricted.
Needs to be assigned in order for model to be exportable to ONNX and Sentis.
:param embedding_size: Embedding size for the entity encoder.
:param concat_self: Whether to concatenate x_self to entities. Set True for ego-centric
self-attention.
"""
super().__init__()
self.self_size: int = 0
self.entity_size: int = entity_size
self.entity_num_max_elements: int = -1
if entity_num_max_elements is not None:
self.entity_num_max_elements = entity_num_max_elements
self.embedding_size = embedding_size
# Initialization scheme from http://www.cs.toronto.edu/~mvolkovs/ICML2020_tfixup.pdf
self.self_ent_encoder = LinearEncoder(
self.entity_size,
1,
self.embedding_size,
kernel_init=Initialization.Normal,
kernel_gain=(0.125 / self.embedding_size) ** 0.5,
)
def add_self_embedding(self, size: int) -> None:
self.self_size = size
self.self_ent_encoder = LinearEncoder(
self.self_size + self.entity_size,
1,
self.embedding_size,
kernel_init=Initialization.Normal,
kernel_gain=(0.125 / self.embedding_size) ** 0.5,
)
def forward(self, x_self: torch.Tensor, entities: torch.Tensor) -> torch.Tensor:
num_entities = self.entity_num_max_elements
if num_entities < 0:
if exporting_to_onnx.is_exporting():
raise UnityTrainerException(
"Trying to export an attention mechanism that doesn't have a set max \
number of elements."
)
num_entities = entities.shape[1]
if self.self_size > 0:
expanded_self = x_self.reshape(-1, 1, self.self_size)
expanded_self = torch.cat([expanded_self] * num_entities, dim=1)
# Concatenate all observations with self
entities = torch.cat([expanded_self, entities], dim=2)
# Encode entities
encoded_entities = self.self_ent_encoder(entities)
return encoded_entities
| EntityEmbedding |
python | getsentry__sentry | src/sentry/codecov/enums.py | {
"start": 464,
"end": 615
} | class ____(Enum):
INTERVAL_30_DAY = "INTERVAL_30_DAY"
INTERVAL_7_DAY = "INTERVAL_7_DAY"
INTERVAL_1_DAY = "INTERVAL_1_DAY"
| MeasurementInterval |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1250866,
"end": 1251666
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData):
"""Audit log entry for a org.enable_saml event."""
__schema__ = github_schema
__field_names__ = ("digest_method_url", "issuer_url", "signature_method_url", "single_sign_on_url")
digest_method_url = sgqlc.types.Field(URI, graphql_name="digestMethodUrl")
"""The SAML provider's digest algorithm URL."""
issuer_url = sgqlc.types.Field(URI, graphql_name="issuerUrl")
"""The SAML provider's issuer URL."""
signature_method_url = sgqlc.types.Field(URI, graphql_name="signatureMethodUrl")
"""The SAML provider's signature algorithm URL."""
single_sign_on_url = sgqlc.types.Field(URI, graphql_name="singleSignOnUrl")
"""The SAML provider's single sign-on URL."""
| OrgEnableSamlAuditEntry |
python | prabhupant__python-ds | data_structures/bst/dfs_recursion.py | {
"start": 0,
"end": 615
} | class ____():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
# Depth First Search
def inorder(root):
if root:
inorder(root.left)
print(root.val)
inorder(root.right)
def postorder(root):
if root:
postorder(root.left)
postorder(root.right)
print(root.val)
def preorder(root):
if root:
print(root.val)
postorder(root.left)
postorder(root.right)
root = Node(3)
root.left = Node(2)
root.right = Node(4)
root.left.left = Node(1)
root.right.right = Node(5)
inorder(root)
| Node |
python | doocs__leetcode | solution/2200-2299/2288.Apply Discount to Prices/Solution.py | {
"start": 0,
"end": 302
} | class ____:
def discountPrices(self, sentence: str, discount: int) -> str:
ans = []
for w in sentence.split():
if w[0] == '$' and w[1:].isdigit():
w = f'${int(w[1:]) * (1 - discount / 100):.2f}'
ans.append(w)
return ' '.join(ans)
| Solution |
python | google__python-fire | fire/decorators_test.py | {
"start": 2045,
"end": 5582
} | class ____(testutils.BaseTestCase):
def testSetParseFnsNamedArgs(self):
self.assertEqual(core.Fire(NoDefaults, command=['double', '2']), 4)
self.assertEqual(core.Fire(NoDefaults, command=['triple', '4']), 12.0)
def testSetParseFnsPositionalArgs(self):
self.assertEqual(core.Fire(NoDefaults, command=['quadruple', '5']), 20)
def testSetParseFnsFnWithPositionalArgs(self):
self.assertEqual(core.Fire(double, command=['5']), 10)
def testSetParseFnsDefaultsFromPython(self):
# When called from Python, function should behave normally.
self.assertTupleEqual(WithDefaults().example1(), (10, int))
self.assertEqual(WithDefaults().example1(5), (5, int))
self.assertEqual(WithDefaults().example1(12.0), (12, float))
def testSetParseFnsDefaultsFromFire(self):
# Fire should use the decorator to know how to parse string arguments.
self.assertEqual(core.Fire(WithDefaults, command=['example1']), (10, int))
self.assertEqual(core.Fire(WithDefaults, command=['example1', '10']),
(10, float))
self.assertEqual(core.Fire(WithDefaults, command=['example1', '13']),
(13, float))
self.assertEqual(core.Fire(WithDefaults, command=['example1', '14.0']),
(14, float))
def testSetParseFnsNamedDefaultsFromPython(self):
# When called from Python, function should behave normally.
self.assertTupleEqual(WithDefaults().example2(), (10, int))
self.assertEqual(WithDefaults().example2(5), (5, int))
self.assertEqual(WithDefaults().example2(12.0), (12, float))
def testSetParseFnsNamedDefaultsFromFire(self):
# Fire should use the decorator to know how to parse string arguments.
self.assertEqual(core.Fire(WithDefaults, command=['example2']), (10, int))
self.assertEqual(core.Fire(WithDefaults, command=['example2', '10']),
(10, float))
self.assertEqual(core.Fire(WithDefaults, command=['example2', '13']),
(13, float))
self.assertEqual(core.Fire(WithDefaults, command=['example2', '14.0']),
(14, float))
def testSetParseFnsPositionalAndNamed(self):
self.assertEqual(core.Fire(MixedArguments, ['example3', '10', '10']),
(10, '10'))
def testSetParseFnsOnlySomeTypes(self):
self.assertEqual(
core.Fire(PartialParseFn, command=['example4', '10', '10']), ('10', 10))
self.assertEqual(
core.Fire(PartialParseFn, command=['example5', '10', '10']), (10, '10'))
def testSetParseFnsForKeywordArgs(self):
self.assertEqual(
core.Fire(WithKwargs, command=['example6']), ('default', 0))
self.assertEqual(
core.Fire(WithKwargs, command=['example6', '--herring', '"red"']),
('default', 0))
self.assertEqual(
core.Fire(WithKwargs, command=['example6', '--mode', 'train']),
('train', 0))
self.assertEqual(core.Fire(WithKwargs, command=['example6', '--mode', '3']),
('3', 0))
self.assertEqual(
core.Fire(WithKwargs,
command=['example6', '--mode', '-1', '--count', '10']),
('-1', 10))
self.assertEqual(
core.Fire(WithKwargs, command=['example6', '--count', '-2']),
('default', -2))
def testSetParseFn(self):
self.assertEqual(
core.Fire(WithVarArgs,
command=['example7', '1', '--arg2=2', '3', '4', '--kwarg=5']),
('1', '2', ('3', '4'), {'kwarg': '5'}))
if __name__ == '__main__':
testutils.main()
| FireDecoratorsTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/collections.py | {
"start": 14268,
"end": 47821
} | class ____:
"""Bridges between the ORM and arbitrary Python collections.
Proxies base-level collection operations (append, remove, iterate)
to the underlying Python collection, and emits add/remove events for
entities entering or leaving the collection.
The ORM uses :class:`.CollectionAdapter` exclusively for interaction with
entity collections.
"""
__slots__ = (
"attr",
"_key",
"_data",
"owner_state",
"invalidated",
"empty",
)
attr: _CollectionAttributeImpl
_key: str
# this is actually a weakref; see note in constructor
_data: Callable[..., _AdaptedCollectionProtocol]
owner_state: InstanceState[Any]
invalidated: bool
empty: bool
def __init__(
self,
attr: _CollectionAttributeImpl,
owner_state: InstanceState[Any],
data: _AdaptedCollectionProtocol,
):
self.attr = attr
self._key = attr.key
# this weakref stays referenced throughout the lifespan of
# CollectionAdapter. so while the weakref can return None, this
# is realistically only during garbage collection of this object, so
# we type this as a callable that returns _AdaptedCollectionProtocol
# in all cases.
self._data = weakref.ref(data) # type: ignore
self.owner_state = owner_state
data._sa_adapter = self
self.invalidated = False
self.empty = False
def _warn_invalidated(self) -> None:
util.warn("This collection has been invalidated.")
@property
def data(self) -> _AdaptedCollectionProtocol:
"The entity collection being adapted."
return self._data()
@property
def _referenced_by_owner(self) -> bool:
"""return True if the owner state still refers to this collection.
This will return False within a bulk replace operation,
where this collection is the one being replaced.
"""
return self.owner_state.dict[self._key] is self._data()
def bulk_appender(self):
return self._data()._sa_appender
def append_with_event(
self, item: Any, initiator: Optional[AttributeEventToken] = None
) -> None:
"""Add an entity to the collection, firing mutation events."""
self._data()._sa_appender(item, _sa_initiator=initiator)
def _set_empty(self, user_data):
assert (
not self.empty
), "This collection adapter is already in the 'empty' state"
self.empty = True
self.owner_state._empty_collections[self._key] = user_data
def _reset_empty(self) -> None:
assert (
self.empty
), "This collection adapter is not in the 'empty' state"
self.empty = False
self.owner_state.dict[self._key] = (
self.owner_state._empty_collections.pop(self._key)
)
def _refuse_empty(self) -> NoReturn:
raise sa_exc.InvalidRequestError(
"This is a special 'empty' collection which cannot accommodate "
"internal mutation operations"
)
def append_without_event(self, item: Any) -> None:
"""Add or restore an entity to the collection, firing no events."""
if self.empty:
self._refuse_empty()
self._data()._sa_appender(item, _sa_initiator=False)
def append_multiple_without_event(self, items: Iterable[Any]) -> None:
"""Add or restore an entity to the collection, firing no events."""
if self.empty:
self._refuse_empty()
appender = self._data()._sa_appender
for item in items:
appender(item, _sa_initiator=False)
def bulk_remover(self):
return self._data()._sa_remover
def remove_with_event(
self, item: Any, initiator: Optional[AttributeEventToken] = None
) -> None:
"""Remove an entity from the collection, firing mutation events."""
self._data()._sa_remover(item, _sa_initiator=initiator)
def remove_without_event(self, item: Any) -> None:
"""Remove an entity from the collection, firing no events."""
if self.empty:
self._refuse_empty()
self._data()._sa_remover(item, _sa_initiator=False)
def clear_with_event(
self, initiator: Optional[AttributeEventToken] = None
) -> None:
"""Empty the collection, firing a mutation event for each entity."""
if self.empty:
self._refuse_empty()
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=initiator)
def clear_without_event(self) -> None:
"""Empty the collection, firing no events."""
if self.empty:
self._refuse_empty()
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=False)
def __iter__(self):
"""Iterate over entities in the collection."""
return iter(self._data()._sa_iterator())
def __len__(self):
"""Count entities in the collection."""
return len(list(self._data()._sa_iterator()))
def __bool__(self):
return True
def _fire_append_wo_mutation_event_bulk(
self, items, initiator=None, key=NO_KEY
):
if not items:
return
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
for item in items:
self.attr.fire_append_wo_mutation_event(
self.owner_state,
self.owner_state.dict,
item,
initiator,
key,
)
def fire_append_wo_mutation_event(self, item, initiator=None, key=NO_KEY):
"""Notify that a entity is entering the collection but is already
present.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
.. versionadded:: 1.4.15
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
return self.attr.fire_append_wo_mutation_event(
self.owner_state, self.owner_state.dict, item, initiator, key
)
else:
return item
def fire_append_event(self, item, initiator=None, key=NO_KEY):
"""Notify that a entity has entered the collection.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
return self.attr.fire_append_event(
self.owner_state, self.owner_state.dict, item, initiator, key
)
else:
return item
def _fire_remove_event_bulk(self, items, initiator=None, key=NO_KEY):
if not items:
return
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
for item in items:
self.attr.fire_remove_event(
self.owner_state,
self.owner_state.dict,
item,
initiator,
key,
)
def fire_remove_event(self, item, initiator=None, key=NO_KEY):
"""Notify that a entity has been removed from the collection.
Initiator is the InstrumentedAttribute that initiated the membership
mutation, and should be left as None unless you are passing along
an initiator value from a chained operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
self.attr.fire_remove_event(
self.owner_state, self.owner_state.dict, item, initiator, key
)
def fire_pre_remove_event(self, initiator=None, key=NO_KEY):
"""Notify that an entity is about to be removed from the collection.
Only called if the entity cannot be removed after calling
fire_remove_event().
"""
if self.invalidated:
self._warn_invalidated()
self.attr.fire_pre_remove_event(
self.owner_state,
self.owner_state.dict,
initiator=initiator,
key=key,
)
def __getstate__(self):
return {
"key": self._key,
"owner_state": self.owner_state,
"owner_cls": self.owner_state.class_,
"data": self.data,
"invalidated": self.invalidated,
"empty": self.empty,
}
def __setstate__(self, d):
self._key = d["key"]
self.owner_state = d["owner_state"]
# see note in constructor regarding this type: ignore
self._data = weakref.ref(d["data"]) # type: ignore
d["data"]._sa_adapter = self
self.invalidated = d["invalidated"]
self.attr = getattr(d["owner_cls"], self._key).impl
self.empty = d.get("empty", False)
def bulk_replace(values, existing_adapter, new_adapter, initiator=None):
"""Load a new collection, firing events based on prior like membership.
Appends instances in ``values`` onto the ``new_adapter``. Events will be
fired for any instance not present in the ``existing_adapter``. Any
instances in ``existing_adapter`` not present in ``values`` will have
remove events fired upon them.
:param values: An iterable of collection member instances
:param existing_adapter: A :class:`.CollectionAdapter` of
instances to be replaced
:param new_adapter: An empty :class:`.CollectionAdapter`
to load with ``values``
"""
assert isinstance(values, list)
idset = util.IdentitySet
existing_idset = idset(existing_adapter or ())
constants = existing_idset.intersection(values or ())
additions = idset(values or ()).difference(constants)
removals = existing_idset.difference(constants)
appender = new_adapter.bulk_appender()
for member in values or ():
if member in additions:
appender(member, _sa_initiator=initiator)
elif member in constants:
appender(member, _sa_initiator=False)
if existing_adapter:
existing_adapter._fire_append_wo_mutation_event_bulk(
constants, initiator=initiator
)
existing_adapter._fire_remove_event_bulk(removals, initiator=initiator)
def _prepare_instrumentation(
factory: Union[Type[Collection[Any]], _CollectionFactoryType],
) -> _CollectionFactoryType:
"""Prepare a callable for future use as a collection class factory.
Given a collection class factory (either a type or no-arg callable),
return another factory that will produce compatible instances when
called.
This function is responsible for converting collection_class=list
into the run-time behavior of collection_class=InstrumentedList.
"""
impl_factory: _CollectionFactoryType
# Convert a builtin to 'Instrumented*'
if factory in __canned_instrumentation:
impl_factory = __canned_instrumentation[factory]
else:
impl_factory = cast(_CollectionFactoryType, factory)
cls: Union[_CollectionFactoryType, Type[Collection[Any]]]
# Create a specimen
cls = type(impl_factory())
# Did factory callable return a builtin?
if cls in __canned_instrumentation:
# if so, just convert.
# in previous major releases, this codepath wasn't working and was
# not covered by tests. prior to that it supplied a "wrapper"
# function that would return the class, though the rationale for this
# case is not known
impl_factory = __canned_instrumentation[cls]
cls = type(impl_factory())
# Instrument the class if needed.
if __instrumentation_mutex.acquire():
try:
if getattr(cls, "_sa_instrumented", None) != id(cls):
_instrument_class(cls)
finally:
__instrumentation_mutex.release()
return impl_factory
def _instrument_class(cls):
"""Modify methods in a class and install instrumentation."""
# In the normal call flow, a request for any of the 3 basic collection
# types is transformed into one of our trivial subclasses
# (e.g. InstrumentedList). Catch anything else that sneaks in here...
if cls.__module__ == "__builtin__":
raise sa_exc.ArgumentError(
"Can not instrument a built-in type. Use a "
"subclass, even a trivial one."
)
roles, methods = _locate_roles_and_methods(cls)
_setup_canned_roles(cls, roles, methods)
_assert_required_roles(cls, roles, methods)
_set_collection_attributes(cls, roles, methods)
def _locate_roles_and_methods(cls):
"""search for _sa_instrument_role-decorated methods in
method resolution order, assign to roles.
"""
roles: Dict[str, str] = {}
methods: Dict[str, Tuple[Optional[str], Optional[int], Optional[str]]] = {}
for supercls in cls.__mro__:
for name, method in vars(supercls).items():
if not callable(method):
continue
# note role declarations
if hasattr(method, "_sa_instrument_role"):
role = method._sa_instrument_role
assert role in ("appender", "remover", "iterator")
roles.setdefault(role, name)
# transfer instrumentation requests from decorated function
# to the combined queue
before: Optional[Tuple[str, int]] = None
after: Optional[str] = None
if hasattr(method, "_sa_instrument_before"):
op, argument = method._sa_instrument_before
assert op in ("fire_append_event", "fire_remove_event")
before = op, argument
if hasattr(method, "_sa_instrument_after"):
op = method._sa_instrument_after
assert op in ("fire_append_event", "fire_remove_event")
after = op
if before:
methods[name] = before + (after,)
elif after:
methods[name] = None, None, after
return roles, methods
def _setup_canned_roles(cls, roles, methods):
"""see if this class has "canned" roles based on a known
collection type (dict, set, list). Apply those roles
as needed to the "roles" dictionary, and also
prepare "decorator" methods
"""
collection_type = util.duck_type_collection(cls)
if collection_type in __interfaces:
assert collection_type is not None
canned_roles, decorators = __interfaces[collection_type]
for role, name in canned_roles.items():
roles.setdefault(role, name)
# apply ABC auto-decoration to methods that need it
for method, decorator in decorators.items():
fn = getattr(cls, method, None)
if (
fn
and method not in methods
and not hasattr(fn, "_sa_instrumented")
):
setattr(cls, method, decorator(fn))
def _assert_required_roles(cls, roles, methods):
"""ensure all roles are present, and apply implicit instrumentation if
needed
"""
if "appender" not in roles or not hasattr(cls, roles["appender"]):
raise sa_exc.ArgumentError(
"Type %s must elect an appender method to be "
"a collection class" % cls.__name__
)
elif roles["appender"] not in methods and not hasattr(
getattr(cls, roles["appender"]), "_sa_instrumented"
):
methods[roles["appender"]] = ("fire_append_event", 1, None)
if "remover" not in roles or not hasattr(cls, roles["remover"]):
raise sa_exc.ArgumentError(
"Type %s must elect a remover method to be "
"a collection class" % cls.__name__
)
elif roles["remover"] not in methods and not hasattr(
getattr(cls, roles["remover"]), "_sa_instrumented"
):
methods[roles["remover"]] = ("fire_remove_event", 1, None)
if "iterator" not in roles or not hasattr(cls, roles["iterator"]):
raise sa_exc.ArgumentError(
"Type %s must elect an iterator method to be "
"a collection class" % cls.__name__
)
def _set_collection_attributes(cls, roles, methods):
"""apply ad-hoc instrumentation from decorators, class-level defaults
and implicit role declarations
"""
for method_name, (before, argument, after) in methods.items():
setattr(
cls,
method_name,
_instrument_membership_mutator(
getattr(cls, method_name), before, argument, after
),
)
# intern the role map
for role, method_name in roles.items():
setattr(cls, "_sa_%s" % role, getattr(cls, method_name))
cls._sa_adapter = None
cls._sa_instrumented = id(cls)
def _instrument_membership_mutator(method, before, argument, after):
"""Route method args and/or return value through the collection
adapter."""
# This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
if before:
fn_args = list(
util.flatten_iterator(inspect_getfullargspec(method)[0])
)
if isinstance(argument, int):
pos_arg = argument
named_arg = len(fn_args) > argument and fn_args[argument] or None
else:
if argument in fn_args:
pos_arg = fn_args.index(argument)
else:
pos_arg = None
named_arg = argument
del fn_args
def wrapper(*args, **kw):
if before:
if pos_arg is None:
if named_arg not in kw:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
value = kw[named_arg]
else:
if len(args) > pos_arg:
value = args[pos_arg]
elif named_arg in kw:
value = kw[named_arg]
else:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
initiator = kw.pop("_sa_initiator", None)
if initiator is False:
executor = None
else:
executor = args[0]._sa_adapter
if before and executor:
getattr(executor, before)(value, initiator)
if not after or not executor:
return method(*args, **kw)
else:
res = method(*args, **kw)
if res is not None:
getattr(executor, after)(res, initiator)
return res
wrapper._sa_instrumented = True # type: ignore[attr-defined]
if hasattr(method, "_sa_instrument_role"):
wrapper._sa_instrument_role = method._sa_instrument_role # type: ignore[attr-defined] # noqa: E501
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
return wrapper
def __set_wo_mutation(collection, item, _sa_initiator=None):
"""Run set wo mutation events.
The collection is not mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_append_wo_mutation_event(
item, _sa_initiator, key=None
)
def __set(collection, item, _sa_initiator, key):
"""Run set events.
This event always occurs before the collection is actually mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
item = executor.fire_append_event(item, _sa_initiator, key=key)
return item
def __del(collection, item, _sa_initiator, key):
"""Run del events.
This event occurs before the collection is actually mutated, *except*
in the case of a pop operation, in which case it occurs afterwards.
For pop operations, the __before_pop hook is called before the
operation occurs.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_remove_event(item, _sa_initiator, key=key)
def __before_pop(collection, _sa_initiator=None):
"""An event which occurs on a before a pop() operation occurs."""
executor = collection._sa_adapter
if executor:
executor.fire_pre_remove_event(_sa_initiator)
def _list_decorators() -> Dict[str, Callable[[_FN], _FN]]:
"""Tailored instrumentation wrappers for any list-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(list, fn.__name__).__doc__
def append(fn):
def append(self, item, _sa_initiator=None):
item = __set(self, item, _sa_initiator, NO_KEY)
fn(self, item)
_tidy(append)
return append
def remove(fn):
def remove(self, value, _sa_initiator=None):
__del(self, value, _sa_initiator, NO_KEY)
# testlib.pragma exempt:__eq__
fn(self, value)
_tidy(remove)
return remove
def insert(fn):
def insert(self, index, value):
value = __set(self, value, None, index)
fn(self, index, value)
_tidy(insert)
return insert
def __setitem__(fn):
def __setitem__(self, index, value):
if not isinstance(index, slice):
existing = self[index]
if existing is not None:
__del(self, existing, None, index)
value = __set(self, value, None, index)
fn(self, index, value)
else:
# slice assignment requires __delitem__, insert, __len__
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
if index.stop is not None:
stop = index.stop
else:
stop = len(self)
if stop < 0:
stop += len(self)
if step == 1:
if value is self:
return
for i in range(start, stop, step):
if len(self) > start:
del self[start]
for i, item in enumerate(value):
self.insert(i + start, item)
else:
rng = list(range(start, stop, step))
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s"
% (len(value), len(rng))
)
for i, item in zip(rng, value):
self.__setitem__(i, item)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, index):
if not isinstance(index, slice):
item = self[index]
__del(self, item, None, index)
fn(self, index)
else:
# slice deletion requires __getslice__ and a slice-groking
# __getitem__ for stepped deletion
# note: not breaking this into atomic dels
for item in self[index]:
__del(self, item, None, index)
fn(self, index)
_tidy(__delitem__)
return __delitem__
def extend(fn):
def extend(self, iterable):
for value in list(iterable):
self.append(value)
_tidy(extend)
return extend
def __iadd__(fn):
def __iadd__(self, iterable):
# list.__iadd__ takes any iterable and seems to let TypeError
# raise as-is instead of returning NotImplemented
for value in list(iterable):
self.append(value)
return self
_tidy(__iadd__)
return __iadd__
def pop(fn):
def pop(self, index=-1):
__before_pop(self)
item = fn(self, index)
__del(self, item, None, index)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self, index=-1):
for item in self:
__del(self, item, None, index)
fn(self)
_tidy(clear)
return clear
# __imul__ : not wrapping this. all members of the collection are already
# present, so no need to fire appends... wrapping it with an explicit
# decorator is still possible, so events on *= can be had if they're
# desired. hard to imagine a use case for __imul__, though.
l = locals().copy()
l.pop("_tidy")
return l
def _dict_decorators() -> Dict[str, Callable[[_FN], _FN]]:
"""Tailored instrumentation wrappers for any dict-like mapping class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(dict, fn.__name__).__doc__
def __setitem__(fn):
def __setitem__(self, key, value, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator, key)
value = __set(self, value, _sa_initiator, key)
fn(self, key, value)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, key, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator, key)
fn(self, key)
_tidy(__delitem__)
return __delitem__
def clear(fn):
def clear(self):
for key in self:
__del(self, self[key], None, key)
fn(self)
_tidy(clear)
return clear
def pop(fn):
def pop(self, key, default=NO_ARG):
__before_pop(self)
_to_del = key in self
if default is NO_ARG:
item = fn(self, key)
else:
item = fn(self, key, default)
if _to_del:
__del(self, item, None, key)
return item
_tidy(pop)
return pop
def popitem(fn):
def popitem(self):
__before_pop(self)
item = fn(self)
__del(self, item[1], None, 1)
return item
_tidy(popitem)
return popitem
def setdefault(fn):
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return default
else:
value = self.__getitem__(key)
if value is default:
__set_wo_mutation(self, value, None)
return value
_tidy(setdefault)
return setdefault
def update(fn):
def update(self, __other=NO_ARG, **kw):
if __other is not NO_ARG:
if hasattr(__other, "keys"):
for key in list(__other):
if key not in self or self[key] is not __other[key]:
self[key] = __other[key]
else:
__set_wo_mutation(self, __other[key], None)
else:
for key, value in __other:
if key not in self or self[key] is not value:
self[key] = value
else:
__set_wo_mutation(self, value, None)
for key in kw:
if key not in self or self[key] is not kw[key]:
self[key] = kw[key]
else:
__set_wo_mutation(self, kw[key], None)
_tidy(update)
return update
l = locals().copy()
l.pop("_tidy")
return l
_set_binop_bases = (set, frozenset)
def _set_binops_check_strict(self: Any, obj: Any) -> bool:
"""Allow only set, frozenset and self.__class__-derived
objects in binops."""
return isinstance(obj, _set_binop_bases + (self.__class__,))
def _set_decorators() -> Dict[str, Callable[[_FN], _FN]]:
"""Tailored instrumentation wrappers for any set-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(set, fn.__name__).__doc__
def add(fn):
def add(self, value, _sa_initiator=None):
if value not in self:
value = __set(self, value, _sa_initiator, NO_KEY)
else:
__set_wo_mutation(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(add)
return add
def discard(fn):
def discard(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator, NO_KEY)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(discard)
return discard
def remove(fn):
def remove(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator, NO_KEY)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(remove)
return remove
def pop(fn):
def pop(self):
__before_pop(self)
item = fn(self)
# for set in particular, we have no way to access the item
# that will be popped before pop is called.
__del(self, item, None, NO_KEY)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self):
for item in list(self):
self.remove(item)
_tidy(clear)
return clear
def update(fn):
def update(self, value):
for item in value:
self.add(item)
_tidy(update)
return update
def __ior__(fn):
def __ior__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.add(item)
return self
_tidy(__ior__)
return __ior__
def difference_update(fn):
def difference_update(self, value):
for item in value:
self.discard(item)
_tidy(difference_update)
return difference_update
def __isub__(fn):
def __isub__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.discard(item)
return self
_tidy(__isub__)
return __isub__
def intersection_update(fn):
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(intersection_update)
return intersection_update
def __iand__(fn):
def __iand__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__iand__)
return __iand__
def symmetric_difference_update(fn):
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(symmetric_difference_update)
return symmetric_difference_update
def __ixor__(fn):
def __ixor__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__ixor__)
return __ixor__
l = locals().copy()
l.pop("_tidy")
return l
| CollectionAdapter |
python | huggingface__transformers | src/transformers/models/florence2/modular_florence2.py | {
"start": 44262,
"end": 44318
} | class ____(BeitDropPath):
pass
| Florence2VisionDropPath |
python | doocs__leetcode | solution/1400-1499/1413.Minimum Value to Get Positive Step by Step Sum/Solution.py | {
"start": 0,
"end": 192
} | class ____:
def minStartValue(self, nums: List[int]) -> int:
s, t = 0, inf
for num in nums:
s += num
t = min(t, s)
return max(1, 1 - t)
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 60233,
"end": 61064
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"path",
"location",
"annotation_level",
"message",
"title",
"raw_details",
)
path = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="path")
location = sgqlc.types.Field(
sgqlc.types.non_null("CheckAnnotationRange"), graphql_name="location"
)
annotation_level = sgqlc.types.Field(
sgqlc.types.non_null(CheckAnnotationLevel), graphql_name="annotationLevel"
)
message = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="message")
title = sgqlc.types.Field(String, graphql_name="title")
raw_details = sgqlc.types.Field(String, graphql_name="rawDetails")
| CheckAnnotationData |
python | numba__numba | numba/cuda/tests/cudapy/test_array_args.py | {
"start": 193,
"end": 4991
} | class ____(CUDATestCase):
def test_array_ary(self):
@cuda.jit('double(double[:],int64)', device=True, inline=True)
def device_function(a, c):
return a[c]
@cuda.jit('void(double[:],double[:])')
def kernel(x, y):
i = cuda.grid(1)
y[i] = device_function(x, i)
x = np.arange(10, dtype=np.double)
y = np.zeros_like(x)
kernel[10, 1](x, y)
self.assertTrue(np.all(x == y))
def test_unituple(self):
@cuda.jit
def f(r, x):
r[0] = x[0]
r[1] = x[1]
r[2] = x[2]
x = (1, 2, 3)
r = np.zeros(len(x), dtype=np.int64)
f[1, 1](r, x)
for i in range(len(x)):
self.assertEqual(r[i], x[i])
def test_tuple(self):
@cuda.jit
def f(r1, r2, x):
r1[0] = x[0]
r1[1] = x[1]
r1[2] = x[2]
r2[0] = x[3]
r2[1] = x[4]
r2[2] = x[5]
x = (1, 2, 3, 4.5, 5.5, 6.5)
r1 = np.zeros(len(x) // 2, dtype=np.int64)
r2 = np.zeros(len(x) // 2, dtype=np.float64)
f[1, 1](r1, r2, x)
for i in range(len(r1)):
self.assertEqual(r1[i], x[i])
for i in range(len(r2)):
self.assertEqual(r2[i], x[i + len(r1)])
def test_namedunituple(self):
@cuda.jit
def f(r, x):
r[0] = x.x
r[1] = x.y
Point = namedtuple('Point', ('x', 'y'))
x = Point(1, 2)
r = np.zeros(len(x), dtype=np.int64)
f[1, 1](r, x)
self.assertEqual(r[0], x.x)
self.assertEqual(r[1], x.y)
def test_namedtuple(self):
@cuda.jit
def f(r1, r2, x):
r1[0] = x.x
r1[1] = x.y
r2[0] = x.r
Point = namedtuple('Point', ('x', 'y', 'r'))
x = Point(1, 2, 2.236)
r1 = np.zeros(2, dtype=np.int64)
r2 = np.zeros(1, dtype=np.float64)
f[1, 1](r1, r2, x)
self.assertEqual(r1[0], x.x)
self.assertEqual(r1[1], x.y)
self.assertEqual(r2[0], x.r)
def test_empty_tuple(self):
@cuda.jit
def f(r, x):
r[0] = len(x)
x = tuple()
r = np.ones(1, dtype=np.int64)
f[1, 1](r, x)
self.assertEqual(r[0], 0)
def test_tuple_of_empty_tuples(self):
@cuda.jit
def f(r, x):
r[0] = len(x)
r[1] = len(x[0])
x = ((), (), ())
r = np.ones(2, dtype=np.int64)
f[1, 1](r, x)
self.assertEqual(r[0], 3)
self.assertEqual(r[1], 0)
def test_tuple_of_tuples(self):
@cuda.jit
def f(r, x):
r[0] = len(x)
r[1] = len(x[0])
r[2] = len(x[1])
r[3] = len(x[2])
r[4] = x[1][0]
r[5] = x[1][1]
r[6] = x[2][0]
r[7] = x[2][1]
r[8] = x[2][2]
x = ((), (5, 6), (8, 9, 10))
r = np.ones(9, dtype=np.int64)
f[1, 1](r, x)
self.assertEqual(r[0], 3)
self.assertEqual(r[1], 0)
self.assertEqual(r[2], 2)
self.assertEqual(r[3], 3)
self.assertEqual(r[4], 5)
self.assertEqual(r[5], 6)
self.assertEqual(r[6], 8)
self.assertEqual(r[7], 9)
self.assertEqual(r[8], 10)
def test_tuple_of_tuples_and_scalars(self):
@cuda.jit
def f(r, x):
r[0] = len(x)
r[1] = len(x[0])
r[2] = x[0][0]
r[3] = x[0][1]
r[4] = x[0][2]
r[5] = x[1]
x = ((6, 5, 4), 7)
r = np.ones(9, dtype=np.int64)
f[1, 1](r, x)
self.assertEqual(r[0], 2)
self.assertEqual(r[1], 3)
self.assertEqual(r[2], 6)
self.assertEqual(r[3], 5)
self.assertEqual(r[4], 4)
self.assertEqual(r[5], 7)
def test_tuple_of_arrays(self):
@cuda.jit
def f(x):
i = cuda.grid(1)
if i < len(x[0]):
x[0][i] = x[1][i] + x[2][i]
N = 10
x0 = np.zeros(N)
x1 = np.ones_like(x0)
x2 = x1 * 3
x = (x0, x1, x2)
f[1, N](x)
np.testing.assert_equal(x0, x1 + x2)
def test_tuple_of_array_scalar_tuple(self):
@cuda.jit
def f(r, x):
r[0] = x[0][0]
r[1] = x[0][1]
r[2] = x[1]
r[3] = x[2][0]
r[4] = x[2][1]
z = np.arange(2, dtype=np.int64)
x = (2 * z, 10, (4, 3))
r = np.zeros(5, dtype=np.int64)
f[1, 1](r, x)
self.assertEqual(r[0], 0)
self.assertEqual(r[1], 2)
self.assertEqual(r[2], 10)
self.assertEqual(r[3], 4)
self.assertEqual(r[4], 3)
| TestCudaArrayArg |
python | getsentry__sentry | src/sentry/migrations/1007_cleanup_failed_safe_deletes.py | {
"start": 207,
"end": 1933
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "1006_drop_legacy_incidentseen_incidentsubscription"),
]
operations = [
# Clean up tables that may not have been deleted due to missing
# historical_silo_assignments entries before the fix
SafeRunSQL(
sql="DROP TABLE IF EXISTS sentry_datasecrecywaiver CASCADE;",
reverse_sql=migrations.RunSQL.noop,
hints={"tables": ["sentry_datasecrecywaiver"]},
),
SafeRunSQL(
sql="DROP TABLE IF EXISTS sentry_dashboardwidgetsnapshot CASCADE;",
reverse_sql=migrations.RunSQL.noop,
hints={"tables": ["sentry_dashboardwidgetsnapshot"]},
),
]
| Migration |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/dms.py | {
"start": 4980,
"end": 6712
} | class ____(AwsBaseOperator[DmsHook]):
"""
Deletes AWS DMS replication task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsDeleteTaskOperator`
:param replication_task_arn: Replication task ARN
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = DmsHook
template_fields: Sequence[str] = aws_template_fields("replication_task_arn")
def __init__(self, *, replication_task_arn: str | None = None, **kwargs):
super().__init__(**kwargs)
self.replication_task_arn = replication_task_arn
def execute(self, context: Context):
"""
Delete AWS DMS replication task from Airflow.
:return: replication task arn
"""
self.hook.delete_replication_task(replication_task_arn=self.replication_task_arn)
self.log.info("DMS replication task(%s) has been deleted.", self.replication_task_arn)
| DmsDeleteTaskOperator |
python | sqlalchemy__sqlalchemy | test/orm/test_instrumentation.py | {
"start": 11583,
"end": 12145
} | class ____(fixtures.ORMTest):
"""Check that Events.load is not hit in regular attributes operations."""
def test_basic(self):
import pickle
global A
class A:
pass
def canary(instance):
assert False
try:
instrumentation.register_class(A)
manager = instrumentation.manager_of_class(A)
event.listen(manager, "load", canary)
a = A()
p_a = pickle.dumps(a)
pickle.loads(p_a)
finally:
del A
| OnLoadTest |
python | numpy__numpy | benchmarks/benchmarks/bench_reduce.py | {
"start": 2682,
"end": 2834
} | class ____(Benchmark):
def setup(self):
self.d = np.ones(100, dtype=np.float32)
def time_small(self):
np.sum(self.d)
| SmallReduction |
python | sqlalchemy__sqlalchemy | test/dialect/mysql/test_compiler.py | {
"start": 59023,
"end": 65020
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mysql.dialect()
match_table = table(
"user",
column("firstname", String),
column("lastname", String),
)
@testing.combinations(
(
lambda title: title.match("somstr", mysql_boolean_mode=False),
"MATCH (matchtable.title) AGAINST (%s)",
),
(
lambda title: title.match(
"somstr",
mysql_boolean_mode=False,
mysql_natural_language=True,
),
"MATCH (matchtable.title) AGAINST (%s IN NATURAL LANGUAGE MODE)",
),
(
lambda title: title.match(
"somstr",
mysql_boolean_mode=False,
mysql_query_expansion=True,
),
"MATCH (matchtable.title) AGAINST (%s WITH QUERY EXPANSION)",
),
(
lambda title: title.match(
"somstr",
mysql_boolean_mode=False,
mysql_natural_language=True,
mysql_query_expansion=True,
),
"MATCH (matchtable.title) AGAINST "
"(%s IN NATURAL LANGUAGE MODE WITH QUERY EXPANSION)",
),
)
def test_match_expression_single_col(self, case, expected):
matchtable = table("matchtable", column("title", String))
title = matchtable.c.title
expr = case(title)
self.assert_compile(expr, expected)
@testing.combinations(
(
lambda expr: expr,
"MATCH (user.firstname, user.lastname) AGAINST (%s)",
),
(
lambda expr: expr.in_boolean_mode(),
"MATCH (user.firstname, user.lastname) AGAINST "
"(%s IN BOOLEAN MODE)",
),
(
lambda expr: expr.in_natural_language_mode(),
"MATCH (user.firstname, user.lastname) AGAINST "
"(%s IN NATURAL LANGUAGE MODE)",
),
(
lambda expr: expr.with_query_expansion(),
"MATCH (user.firstname, user.lastname) AGAINST "
"(%s WITH QUERY EXPANSION)",
),
(
lambda expr: (
expr.in_natural_language_mode().with_query_expansion()
),
"MATCH (user.firstname, user.lastname) AGAINST "
"(%s IN NATURAL LANGUAGE MODE WITH QUERY EXPANSION)",
),
)
def test_match_expression_multiple_cols(self, case, expected):
firstname = self.match_table.c.firstname
lastname = self.match_table.c.lastname
expr = match(firstname, lastname, against="Firstname Lastname")
expr = case(expr)
self.assert_compile(expr, expected)
@testing.combinations(
(bindparam("against_expr"), "%s"),
(
column("some col") + column("some other col"),
"`some col` + `some other col`",
),
(column("some col") + bindparam("against_expr"), "`some col` + %s"),
)
def test_match_expression_against_expr(self, against, expected_segment):
firstname = self.match_table.c.firstname
lastname = self.match_table.c.lastname
expr = match(firstname, lastname, against=against)
expected = (
"MATCH (user.firstname, user.lastname) AGAINST (%s)"
% expected_segment
)
self.assert_compile(expr, expected)
def test_cols_required(self):
assert_raises_message(
exc.ArgumentError,
"columns are required",
match,
against="Firstname Lastname",
)
@testing.combinations(
(True, False, True), (True, True, False), (True, True, True)
)
def test_invalid_combinations(
self, boolean_mode, natural_language, query_expansion
):
firstname = self.match_table.c.firstname
lastname = self.match_table.c.lastname
assert_raises_message(
exc.ArgumentError,
"columns are required",
match,
against="Firstname Lastname",
)
expr = match(
firstname,
lastname,
against="Firstname Lastname",
in_boolean_mode=boolean_mode,
in_natural_language_mode=natural_language,
with_query_expansion=query_expansion,
)
msg = (
"Invalid MySQL match flags: "
"in_boolean_mode=%s, "
"in_natural_language_mode=%s, "
"with_query_expansion=%s"
) % (boolean_mode, natural_language, query_expansion)
assert_raises_message(
exc.CompileError,
msg,
expr.compile,
dialect=self.__dialect__,
)
def test_match_operator(self):
matchtable = table("matchtable", column("title", String))
self.assert_compile(
matchtable.c.title.match("somstr"),
"MATCH (matchtable.title) AGAINST (%s IN BOOLEAN MODE)",
)
def test_literal_binds(self):
expr = literal("x").match(literal("y"))
self.assert_compile(
expr,
"MATCH ('x') AGAINST ('y' IN BOOLEAN MODE)",
literal_binds=True,
)
def test_char_zero(self):
"""test #9544"""
t1 = Table(
"sometable",
MetaData(),
Column("a", CHAR(0)),
Column("b", VARCHAR(0)),
Column("c", String(0)),
Column("d", NVARCHAR(0)),
Column("e", NCHAR(0)),
Column("f", TEXT(0)),
Column("g", Text(0)),
Column("h", BLOB(0)),
Column("i", LargeBinary(0)),
)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE sometable (a CHAR(0), b VARCHAR(0), "
"c VARCHAR(0), d NATIONAL VARCHAR(0), e NATIONAL CHAR(0), "
"f TEXT(0), g TEXT(0), h BLOB(0), i BLOB(0))",
)
| MatchExpressionTest |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/session_debug_testlib.py | {
"start": 60881,
"end": 64715
} | class ____(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variable_v1.VariableV1(30.0, name="v")
constants = []
for i in range(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in range(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in range(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_indices = []
for index in range(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_indices = []
executor_step_indices = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_indices.append(core_metadata.session_run_index)
executor_step_indices.append(core_metadata.executor_step_index)
all_session_run_indices.extend(session_run_indices)
# Assert that executor_step_index increases by one at a time.
executor_step_indices = zip(timestamps, executor_step_indices)
executor_step_indices = sorted(
executor_step_indices, key=lambda x: x[0])
for i in range(len(executor_step_indices) - 1):
self.assertEqual(
executor_step_indices[i][1] + 1, executor_step_indices[i + 1][1]
)
# Assert that session_run_index increase monotonically.
session_run_indices = zip(timestamps, session_run_indices)
session_run_indices = sorted(session_run_indices, key=lambda x: x[0])
for i in range(len(session_run_indices) - 1):
self.assertGreater(session_run_indices[i + 1][1],
session_run_indices[i][1])
# Assert that the session_run_indices from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_indices),
len(set(all_session_run_indices)))
if __name__ == "__main__":
googletest.main()
| DebugConcurrentRunCallsTest |
python | OmkarPathak__pygorithm | tests/test_geometry.py | {
"start": 62026,
"end": 95476
} | class ____(unittest.TestCase):
"""
It is suggested that you follow along these tests with the images
at imgs/test_geometry/test_extrapolated_intersection. All image
references will be relative to that folder and will be referencing
the .py file, whereas the actual images are in the out/
folder with the the full prefix and image file type.
The file names are prefixed with a unique 2 character alphabetical
code per test function, which is the prefix for the matplotlib file,
followed by a unique 2 character numeric code to identify each image,
followed by an underscore and the name of the test function they are
referenced in. In the code they are just referenced with the first 4
characters of the image file name.
Note that you can open up the interactive matplotlib plot by calling
the corresponding python file with py, and to export the 4 image files
to their appropriate location you just pass the "--export" flag to the
python file.
"""
def setUp(self):
self.pt = vector2.Vector2
self.ln = line2.Line2
self.extr_intr = extrapolated_intersection
random.seed()
# calculate_one_moving_point_and_one_stationary_line
def _calc_one_moving_point_one_stat_line_fuzzer(self, pt, vel, line):
fn = self.extr_intr.calculate_one_moving_point_and_one_stationary_line
offset = self.pt(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
newline = self.ln(line.start - offset, line.end - offset)
intr, dist = fn(pt, vel, newline, offset)
return intr, dist, offset
def test_point_line_no_intr(self):
fn = self._calc_one_moving_point_one_stat_line_fuzzer
# aa01 (see class comment!)
intr, dist, offset = fn(self.pt(1, 1), self.pt(1, 0), self.ln(self.pt(6, 2), self.pt(2, 4)))
self.assertFalse(intr, msg=repr(offset))
self.assertIsNone(dist, msg=repr(offset))
# aa02
intr, dist, offset = fn(self.pt(1, 1), self.pt(0, 1), self.ln(self.pt(6, 2), self.pt(2, 4)))
self.assertFalse(intr, msg=repr(offset))
self.assertIsNone(dist, msg=repr(offset))
# aa03
intr, dist, offset = fn(self.pt(4, 1), self.pt(-3, 3).normalize(), self.ln(self.pt(2, 4), self.pt(6, 4)))
self.assertFalse(intr, msg=repr(offset))
self.assertIsNone(dist, msg=repr(offset))
# aa04
intr, dist, offset = fn(self.pt(2, 1), self.pt(4, 3).normalize(), self.ln(self.pt(1, 2), self.pt(5, 4)))
self.assertFalse(intr, msg=repr(offset))
self.assertIsNone(dist, msg=repr(offset))
def test_point_line_touching(self):
fn = self._calc_one_moving_point_one_stat_line_fuzzer
# ab01
intr, dist, offset = fn(self.pt(1, 1), self.pt(1, 3).normalize(), self.ln(self.pt(2, 4), self.pt(6, 2)))
self.assertTrue(intr, repr(offset))
self.assertAlmostEqual(self.pt(1, 3).magnitude(), dist, msg=repr(offset))
# ab02
intr, dist, offset = fn(self.pt(2, 1), self.pt(4, 1).normalize(), self.ln(self.pt(2, 0), self.pt(6, 2)))
self.assertTrue(intr, repr(offset))
self.assertAlmostEqual(self.pt(4, 1).magnitude(), dist, msg=repr(offset))
# ab03
intr, dist, offset = fn(self.pt(2, 1), self.pt(0, -1), self.ln(self.pt(2, 0), self.pt(6, 2)))
self.assertTrue(intr, msg=repr(offset))
self.assertAlmostEqual(1, dist, msg=repr(offset))
# ab04
intr, dist, offset = fn(self.pt(6.25, 3), self.pt(-4.25, -3).normalize(), self.ln(self.pt(2, 0), self.pt(6, 2)))
self.assertTrue(intr, msg=repr(offset))
self.assertAlmostEqual(self.pt(4.25, 3).magnitude(), dist, msg=repr(offset))
def test_point_line_touching_at_start(self):
fn = self._calc_one_moving_point_one_stat_line_fuzzer
# ac01
intr, dist, offset = fn(self.pt(4, 1), self.pt(-1, 1).normalize(), self.ln(self.pt(2, 0), self.pt(6, 2)))
self.assertTrue(intr, msg=repr(offset))
self.assertEqual(0, dist, msg=repr(offset))
# ac02
intr, dist, offset = fn(self.pt(2, 2), self.pt(-1, 0), self.ln(self.pt(2, 2), self.pt(6, 2)))
self.assertTrue(intr, msg=repr(offset))
self.assertEqual(0, dist, msg=repr(offset))
# ac03
intr, dist, offset = fn(self.pt(3, 1), self.pt(1, 1).normalize(), self.ln(self.pt(3, 0), self.pt(3, 4)))
self.assertTrue(intr, msg=repr(offset))
self.assertEqual(0, dist, msg=repr(offset))
# ac04
intr, dist, offset = fn(self.pt(3, 4), self.pt(-1, 0), self.ln(self.pt(3, 0), self.pt(3, 4)))
self.assertTrue(intr, msg=repr(offset))
self.assertEqual(0, dist, msg=repr(offset))
def test_point_line_intr_later(self):
fn = self._calc_one_moving_point_one_stat_line_fuzzer
# ad01
intr, dist, offset = fn(self.pt(0, 2), self.pt(3, -1).normalize(), self.ln(self.pt(3, 0), self.pt(3, 4)))
self.assertTrue(intr, msg=repr(offset))
self.assertAlmostEqual(self.pt(3, -1).magnitude(), dist, msg=repr(offset))
# ad02
intr, dist, offset = fn(self.pt(6, 2), self.pt(-1, 0), self.ln(self.pt(3, 0), self.pt(3, 4)))
self.assertTrue(intr, msg=repr(offset))
self.assertAlmostEqual(3, dist, msg=repr(offset))
# ad03
intr, dist, offset = fn(self.pt(6, 2), self.pt(-1, 0), self.ln(self.pt(1, 1), self.pt(5, 3)))
self.assertTrue(intr, msg=repr(offset))
self.assertAlmostEqual(3, dist, msg=repr(offset))
# ad04
intr, dist, offset = fn(self.pt(6, 4), self.pt(-3, -1).normalize(), self.ln(self.pt(1, 1), self.pt(5, 3)))
self.assertTrue(intr, msg=repr(offset))
self.assertAlmostEqual(self.pt(-3, -1).magnitude(), dist, msg=repr(offset))
# calculate_one_moving_line_and_one_stationary_line
def _calc_one_moving_line_one_stat_line_fuzzer(self, line1tup, vel1tuporvec, _line2tup):
fn = self.extr_intr.calculate_one_moving_line_and_one_stationary_line
line1 = self.ln(self.pt(line1tup[0]), self.pt(line1tup[1]))
vel1 = self.pt(vel1tuporvec)
_line2 = self.ln(self.pt(_line2tup[0]), self.pt(_line2tup[1]))
offset1 = self.pt(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
offset2 = self.pt(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
newline1 = self.ln(line1.start - offset1, line1.end - offset1)
newline2 = self.ln(_line2.start - offset2, _line2.end - offset2)
intr, dist = fn(newline1, offset1, vel1, newline2, offset2)
return intr, dist, "\n\nline1={}\nvel1={}\nline2={}\noffset1={}\noffset2={}".format(line1, vel1, _line2, repr(offset1), repr(offset2))
def test_line_line_no_intr(self):
fn = self._calc_one_moving_line_one_stat_line_fuzzer
# ae01
intr, dist, msg = fn(((1, 4), (1, 3)), (1, 0), ((1, 1), (3, 2)))
self.assertFalse(intr, msg=msg)
self.assertIsNone(dist, msg=msg)
# ae02
intr, dist, msg = fn(((1, 3), (2, 4)), self.pt(1, -1).normalize(), ((1, 0.5), (3, 0.5)))
self.assertFalse(intr, msg=msg)
self.assertIsNone(dist, msg=msg)
# ae03
intr, dist, msg = fn(((1, 3), (2, 4)), self.pt(1, -1).normalize(), ((4, 3), (6, 4)))
self.assertFalse(intr, msg=msg)
self.assertIsNone(dist, msg=msg)
# ae04
intr, dist, msg = fn(((1, 3), (2, 3)), self.pt(1, -1).normalize(), ((0, 4), (3, 3)))
self.assertFalse(intr, msg=msg)
self.assertIsNone(dist, msg=msg)
def test_line_line_touching(self):
fn = self._calc_one_moving_line_one_stat_line_fuzzer
# af01
intr, dist, msg = fn(((1, 3), (2, 3)), self.pt(1, -1).normalize(), ((3, 3), (5, 0)))
self.assertFalse(intr, msg=msg)
self.assertIsNone(dist, msg=msg)
# af02
intr, dist, msg = fn(((1, 1), (2, 1)), self.pt(1, 1).normalize(), ((3, 3), (3, 2)))
self.assertTrue(intr, msg=msg)
self.assertAlmostEqual(self.pt(1, 1).magnitude(), dist, msg=msg)
# af03
intr, dist, msg = fn(((1, 1), (2, 1)), self.pt(1, 1).normalize(), ((2, 3), (3, 3)))
self.assertFalse(intr, msg=msg)
self.assertIsNone(dist, msg=msg)
# af04
intr, dist, msg = fn(((1, 1), (2, 1)), (0, 1), ((2, 3), (3, 3)))
self.assertFalse(intr, msg=msg)
self.assertIsNone(dist, msg=msg)
def test_line_line_touching_at_start(self):
fn = self._calc_one_moving_line_one_stat_line_fuzzer
# ag01
intr, dist, msg = fn(((1, 1), (2, 1)), (0, 1), ((2, 1), (3, 0)))
self.assertFalse(intr, msg=msg)
self.assertIsNone(dist, msg=msg)
# ag02
intr, dist, msg = fn(((1, 1), (1, 3)), (1, 0), ((1, 2), (2, 2)))
self.assertTrue(intr, msg=msg)
self.assertEqual(0, dist, msg=msg)
# ag03
intr, dist, msg = fn(((1, 1), (2, 0)), (1, 0), ((0, 1), (1.5, 0.5)))
self.assertFalse(intr, msg=msg)
self.assertIsNone(dist, msg=msg)
# ag04
intr, dist, msg = fn(((5, 4), (6, 3)), (-1, -1), ((5.5, 3.5), (6, 4)))
self.assertFalse(intr, msg=msg)
self.assertIsNone(dist, msg=msg)
def test_line_line_intr_later(self):
fn = self._calc_one_moving_line_one_stat_line_fuzzer
# ah01
intr, dist, msg = fn(((5, 4), (6, 3)), (-1, -1), ((3.5, 1.5), (3.5, 0)))
self.assertTrue(intr, msg=msg)
self.assertAlmostEqual(self.pt(-2, -2).magnitude(), dist, msg=msg)
# ah02
intr, dist, msg = fn(((5, 4), (5, 3)), (-1, -1), ((3, 3), (3, 0)))
self.assertTrue(intr, msg=msg)
self.assertAlmostEqual(self.pt(-2, -2).magnitude(), dist, msg=msg)
# ah03
intr, dist, msg = fn(((5, 4), (5, 3)), (-1, 0), ((1, 1), (3, 3.5)))
self.assertTrue(intr, msg=msg)
self.assertAlmostEqual(2, dist, msg=msg)
# ah04
intr, dist, msg = fn(((0, 1), (1, 0)), (0.25, 0.5), ((2, 1), (2, 4)))
self.assertTrue(intr, msg=msg)
self.assertAlmostEqual(self.pt(1, 2).magnitude(), dist, smg=msg)
# calculate_one_moving_and_one_stationary
def _calc_one_moving_one_stat_fuzzer(self, poly1tup, vel1tuporvec, poly2tup):
fn = self.extr_intr.calculate_one_moving_and_one_stationary
poly1 = polygon2.Polygon2(list(vector2.Vector2(p) for p in poly1tup))
vel1 = vector2.Vector2(vel1tuporvec)
poly2 = polygon2.Polygon2(list(vector2.Vector2(p) for p in poly2tup))
offset1 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
offset2 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
newpoly1 = polygon2.Polygon2(list(p - offset1 for p in poly1.points))
newpoly2 = polygon2.Polygon2(list(p - offset2 for p in poly2.points))
msg = "\n\npoly1={}\n\npoly2={}\n\nvel1={}\n\noffset1={}\n\noffset2={}".format(repr(poly1), repr(poly2), repr(vel1), repr(offset1), repr(offset2))
intr = fn(newpoly1, offset1, vel1, newpoly2, offset2)
return intr, msg
def test_one_moving_one_stationary_no_intr(self):
fn = self._calc_one_moving_one_stat_fuzzer
# ai01
intr, msg = fn(((0, 1), (1, 2), (2, 1), (1, 0)), (0, 1), ((3, 1), (3, 2), (4, 1)))
self.assertFalse(intr, msg=msg)
# ai02
intr, msg = fn(((0, 1), (1, 2), (2, 1), (1, 0)), self.pt(1, 2).normalize(), ((3, 1), (3, 2), (4, 1)))
self.assertFalse(intr, msg=msg)
# ai03
intr, msg = fn(((4, 4), (5, 3.5), (5.5, 2.5), (4, 3)), (-1, 0), ((3, 1), (3, 2), (4, 1)))
self.assertFalse(intr, msg=msg)
# ai04
intr, msg = fn(((3, 2), (3, 1), (4, 1)), (1, 0), ((4, 4), (5, 3.5), (5.5, 2.5), (4, 3)))
self.assertFalse(intr, msg=msg)
def test_one_moving_one_stationary_touching(self):
fn = self._calc_one_moving_one_stat_fuzzer
# aj01
intr, msg = fn(((4, 4), (5, 3.5), (5.5, 2.5), (4, 2), (3, 3)), (-1, 0), ((1, 2), (2, 1), (1, 0), (0, 1)))
self.assertFalse(intr, msg=msg)
# aj02
intr, msg = fn(((4, 4), (5, 3.5), (5.5, 2.5), (4, 2), (3, 3)), self.pt(-1, -2).normalize(), ((1, 2), (2, 1), (1, 0), (0, 1)))
self.assertFalse(intr, msg=msg)
# aj03
intr, msg = fn(((0, 1), (1, 1), (1, 0), (0, 0)), self.pt(1, 2).normalize(), ((2, 2), (3, 3), (4, 2)))
self.assertFalse(intr, msg=msg)
# aj04
intr, msg = fn(((0, 1), (1, 1), (1, 0), (0, 0)), self.pt(4, 1).normalize(), ((2, 2), (3, 3), (4, 2)))
self.assertFalse(intr, msg=msg)
def test_one_moving_one_stationary_intr_at_start(self):
fn = self._calc_one_moving_one_stat_fuzzer
# ak01
intr, msg = fn(((0, 1), (1, 1), (1, 0), (0, 0)), (0, 1), ((1, 1), (2, 2), (3, 1)))
self.assertTrue(intr, msg=msg)
# ak02
intr, msg = fn(((1, 1), (2, 2), (3, 1)), (-1, 1), ((2.5, 0.5), (4.5, 2.5), (5, 1), (4, 0.5)))
self.assertTrue(intr, msg=msg)
# ak03
intr, msg = fn(((1, 1), (2, 2), (3, 1)), (-1, -1), ((2.5, 0.5), (4.5, 2.5), (5, 1), (4, 0.5)))
self.assertTrue(intr, msg=msg)
# ak04
intr, msg = fn(((2, 2), (3, 1), (2, 0)), (-1, 0), ((3, 2), (4.5, 2.5), (5, 1), (4, 0.5), (2.5, 0.5)))
self.assertTrue(intr, msg=msg)
def test_one_moving_one_stationary_intr_later(self):
fn = self._calc_one_moving_one_stat_fuzzer
# al01
intr, msg = fn(((5, 3), (6, 2), (4, 2)), self.pt(-2, -1).normalize(), ((2, 2), (3, 1), (2, 0)))
self.assertTrue(intr, msg=msg)
# al02
intr, msg = fn(((2.5, 4), (4, 4), (5, 3), (2.5, 3)), (0, 1), ((2, 2), (3, 1), (2, 0), (0, 1)))
self.assertTrue(intr, msg=msg)
# al03
intr, msg = fn(((1, 4), (2, 4), (2, 3), (1, 3)), (-1, -2), ((0, 1), (2, 2), (3, 1), (2, 0)))
self.assertTrue(intr, msg=msg)
# al04
intr, msg = fn(((5, 2.5), (6, 2.5), (4, 1.25), (4, 1.75)), (-5, 0), ((0, 1), (2, 2), (3, 1), (2, 0)))
self.assertTrue(intr, msg=msg)
# calculate_one_moving_one_stationary_distancelimit
def _calc_one_moving_one_stat_distlimit_fuzzer(self, poly1tup, vel1tuporvec, poly2tup, distlimit):
fn = self.extr_intr.calculate_one_moving_one_stationary_distancelimit
poly1 = polygon2.Polygon2(list(vector2.Vector2(p) for p in poly1tup))
vel1 = vector2.Vector2(vel1tuporvec)
poly2 = polygon2.Polygon2(list(vector2.Vector2(p) for p in poly2tup))
offset1 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
offset2 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
vel1scalar = random.uniform(0.1, 10)
newpoly1 = polygon2.Polygon2(list(p - offset1 for p in poly1.points))
newpoly2 = polygon2.Polygon2(list(p - offset2 for p in poly2.points))
newvel1 = vel1 * vel1scalar
msg = "\n\npoly1={}\n\npoly2={}\n\nvel1={}, distlimit={}\n\nvel1scalar={}\n\noffset1={}\n\noffset2={}".format(repr(poly1), repr(poly2), repr(vel1), repr(distlimit), repr(vel1scalar), repr(offset1), repr(offset2))
intr = fn(newpoly1, offset1, newvel1, newpoly2, offset2, distlimit)
return intr, msg
def test_one_moving_one_stationary_distlimit_no_intr(self):
fn = self._calc_one_moving_one_stat_distlimit_fuzzer
# am01
intr, msg = fn(((0, 3), (1, 3), (1, 2), (0, 2)), (1, 0), ((2, 0), (3, 1), (4, 0)), 4)
self.assertFalse(intr, msg=msg)
# am02
intr, msg = fn(((1, 4), (2, 4), (2, 3), (1, 3)), (5, -3), ((0, 1), (2, 2), (3, 1), (2, 0)), self.pt(5, -3).magnitude())
self.assertFalse(intr, msg=msg)
# am03
intr, msg = fn(((1, 3), (2, 4), (3, 4), (3, 3)), (3, -2), ((0, 1), (2, 2), (3, 0), (2, 0)), self.pt(3, -2).magnitude())
self.assertFalse(intr, msg=msg)
# am04
intr, msg = fn(((4, 1.75), (5, 2.5), (6, 2.5), (4, 1.25)), (-2, 1), ((4, 1.75), (5, 2.5), (6, 2.5), (4, 1.25)), self.pt(-2, 1).magnitude())
self.assertFalse(intr, msg=msg)
def test_one_moving_one_stationary_distlimit_touching(self):
fn = self._calc_one_moving_one_stat_distlimit_fuzzer
# an01
intr, msg = fn(((0, 3), (1, 3), (1, 2), (0, 2)), (5, -1.25), ((3, 1), (4, 1), (4, 0), (3, 0)), self.pt(5, -1.25).magnitude())
self.assertFalse(intr, msg=msg)
# an02
intr, msg = fn(((1, 3), (2, 3), (2, 2), (1, 2)), (4, 0), ((1, 0), (2, 1), (4, 2), (5, 0)), 4)
self.assertFalse(intr, msg=msg)
# an03
intr, msg = fn(((1, 3), (2, 4), (3, 4), (3, 2)), (3, -2), ((0, 1), (2.5, 2), (3, 0), (2, 0)), self.pt(3, -2).magnitude())
self.assertFalse(intr, msg=msg)
# an04
intr, msg = fn(((0, 0), (1, 2), (2, 1)), (3, 3), ((3, 2), (5, 3), (5, 1)), self.pt(3, 3).magnitude())
self.assertFalse(intr, msg=msg)
def test_one_moving_one_stationary_distlimit_intr_at_start(self):
fn = self._calc_one_moving_one_stat_distlimit_fuzzer
# ao01
intr, msg = fn(((3, 3), (4, 3), (4, 1), (3, 1)), (2, 0), ((3, 1), (4, 1), (4, 0), (3, 0)), 2)
self.assertFalse(intr, msg=msg)
# ao02
intr, msg = fn(((3, 3), (4, 3), (4, 1), (3, 1)), (2, -0.25), ((3, 1), (4, 1), (4, 0), (3, 0)), self.pt(2, -0.25).magnitude())
self.assertTrue(intr, msg=msg)
# ao03
intr, msg = fn(((1, 1), (2, 4), (3, 4), (3, 2)), (-1, 2), ((0, 1), (2.5, 2), (3, 0), (2, 0)), self.pt(-1, 2).magnitude())
self.assertTrue(intr, msg=msg)
# ao04
intr, msg = fn(((4, 0), (3, 2), (5, 2)), (0, 1), ((3, 0), (5, 1), (5, -1)), 3)
self.assertTrue(intr, msg=msg)
def test_one_moving_one_stationary_distlimit_intr_later(self):
fn = self._calc_one_moving_one_stat_distlimit_fuzzer
# ap01
intr, msg = fn(((2, 3), (3, 3), (3, 2), (2, 2)), (5, 4), ((3, 5), (4, 5), (4, 4), (3, 4)), self.pt(5, 4).magnitude())
self.assertTrue(intr, msg=msg)
# ap02
intr, msg = fn(((8, 5), (7, 3), (6, 3)), (-4, -3), ((4, 3), (4.5, 3.5), (7, 1), (6, 0)), self.pt(-4, -3).magnitude())
self.assertTrue(intr, msg=msg)
# ap03
intr, msg = fn(((4, 3), (6, 3), (6, 2), (5, 1)), (-1, 0), ((4, 1.25), (5, 0), (3, 0)), 3)
self.assertTrue(intr, msg=msg)
# ap04
intr, msg = fn(((2, 1), (6, 1), (5, 0)), (0, 1), ((3, 3), (4, 3), (4, 2), (3, 2)), 4)
self.assertTrue(intr, msg=msg)
def test_one_moving_one_stationary_distlimit_touch_at_limit(self):
fn = self._calc_one_moving_one_stat_distlimit_fuzzer
# aq01
intr, msg = fn(((0, 1), (1, 1), (1, 0), (0, 0)), (4, 3), ((3, 5), (4, 5), (4, 4), (3, 4)), self.pt(4, 3).magnitude())
self.assertFalse(intr, msg=msg)
# aq02
intr, msg = fn(((5, 6), (4, 3), (4, 4)), (2, -1.5), ((1, 3), (2, 3.5), (7, 1), (6, 0)), self.pt(2, -1.5).magnitude())
self.assertFalse(intr, msg=msg)
# aq03
intr, msg = fn(((4, 3), (6, 3), (6, 2), (5, 1)), (-1, 0), ((0, 3), (1, 3), (2, 1), (0, 1)), 3)
self.assertFalse(intr, msg=msg)
# aq04
intr, msg = fn(((2, 1), (6, 1), (5, 0)), (0, 1), ((3, 4), (4, 4), (4, 3), (3, 3)), 2)
self.assertFalse(intr, msg=msg)
def test_one_moving_one_stationary_distlimit_intr_after_limit(self):
fn = self._calc_one_moving_one_stat_distlimit_fuzzer
# ar01
intr, msg = fn(((0, 1), (1, 1), (1, 0), (0, 0)), (4, 3), ((5.5, 5.5), (6.5, 5.5), (6.5, 4.5), (5.5, 4.5)), self.pt(4, 3).magnitude())
self.assertFalse(intr, msg=msg)
# ar02
intr, msg = fn(((5, 6), (4, 3), (4, 4)), (2, -1.5), ((1, 3), (2, 3.5), (7, 1), (6, 0)), 1.5)
self.assertFalse(intr, msg=msg)
# ar03
intr, msg = fn(((4, 3), (6, 3), (6, 2), (5, 1)), (-1, 0), ((0, 3), (1, 3), (2, 1), (0, 1)), 2.5)
self.assertFalse(intr, msg=msg)
# ar04
intr, msg = fn(((2, 1), (6, 1), (5, 0)), (0, 1), ((3, 4), (4, 4), (4, 3), (3, 3)), 1.75)
self.assertFalse(intr, msg=msg)
# calculate_one_moving_one_stationary_along_path
def _calc_one_moving_one_stat_along_path_fuzzer(self, poly1tup, pos1tuporvec, pos2tuporvec, poly2tup, reverse=False):
# i generated a few polygons in the wrong order when making these tests
if reverse:
poly1tup = list(p for p in poly1tup)
poly1tup.reverse()
poly2tup = list(p for p in poly2tup)
poly2tup.reverse()
fn = self.extr_intr.calculate_one_moving_one_stationary_along_path
poly1 = polygon2.Polygon2(list(vector2.Vector2(p) for p in poly1tup))
pos1 = vector2.Vector2(pos1tuporvec)
pos2 = vector2.Vector2(pos2tuporvec)
poly2 = polygon2.Polygon2(list(vector2.Vector2(p) for p in poly2tup))
offset1 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
offset2 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
newpoly1 = polygon2.Polygon2(list(p - offset1 for p in poly1.points))
newpoly2 = polygon2.Polygon2(list(p - offset2 for p in poly2.points))
newpos1 = pos1 + offset1
newpos2 = pos2 + offset1
msg = "\n\npoly1={}\n\npoly2={}\n\npos1={}, pos2={}\n\noffset1={}\n\noffset2={}".format(repr(poly1), repr(poly2), repr(pos1), repr(pos2), repr(offset1), repr(offset2))
intr = fn(newpoly1, newpos1, newpos2, newpoly2, offset2)
return intr, msg
# i started using rand_moving_stationary_generator to create these. this still takes
# a while because that generator doesn't guarrantee valid polygons and certainly won't
# find the situation we're testing for without some work, but it's still faster.
def test_one_moving_one_stationary_along_path_no_intr(self):
fn = self._calc_one_moving_one_stat_along_path_fuzzer
# as01
intr, msg = fn(((0, 0), (0, 1), (1, 1), (1, 0)), (0, 0), (4, 3), ((3, 1), (4, 1), (4, 0), (3, 0)))
self.assertFalse(intr, msg=msg)
# as02
intr, msg = fn(((11, 5), (8, 8), (7, 7), (6, 3), (9, 3)), (0, 0), (-1, -3), ((3.5, 8.5), (1.5, 8.5), (-0.5, 7.5), (0.5, 3.5), (1.5, 2.5), (4.5, 2.5), (5.5, 6.5)), reverse=True)
self.assertFalse(intr, msg=msg)
# as03
intr, msg = fn(((0.5, 9.0), (-1.5, 8.0), (-1.5, 6.0), (1.5, 5.0), (2.5, 5.0), (2.5, 9.0)), (0, 0), (0, 5), ((7.0, 6.0), (4.0, 5.0), (4.0, 3.0), (6.0, 2.0), (8.0, 3.0)), reverse=True)
self.assertFalse(intr, msg=msg)
# as04
intr, msg = fn(((5.5, 4.5), (3.5, -1.5), (9.5, -1.5), (10.5, 0.5)), (0, 0), (-4, 0), ((7.5, 8.5), (6.5, 5.5), (7.5, 4.5), (9.5, 4.5), (10.5, 7.5)), reverse=True)
self.assertFalse(intr, msg=msg)
def test_one_moving_one_stationary_along_path_touching(self):
fn = self._calc_one_moving_one_stat_along_path_fuzzer
# at01
intr, msg = fn(((3, 10), (2, 10), (1, 8), (2, 6), (5, 6), (7, 8)), (0, 0), (8, 0), ((10, 5), (8, 6), (6, 5), (6, 4), (7, 2), (10, 4)), reverse=True)
self.assertFalse(intr, msg=msg)
# at02
intr, msg = fn(((5, 5), (4, 5), (2, 0), (4, -1), (6, 0)), (0, 0), (-5, 0), ((2, 11), (-2, 8), (2, 5), (3, 6), (3, 11)), reverse=True)
self.assertFalse(intr, msg=msg)
# at03
intr, msg = fn(((9.5, 8.5), (8.5, 7.5), (9.5, 5), (10.5, 7)), (0, 0), (-9, -9), ((2, 5), (-1, 5), (-2, 3), (2, 1), (3, 2)), reverse=True)
self.assertFalse(intr, msg=msg)
# at04
intr, msg = fn(((4.5, 4), (0.5, 2), (0.5, 1), (0.5, 0), (2.5, -2), (3.5, -2), (5.5, -1)), (0, 0), (6.7492919018596025, 4.29500393754702), ((8, 8.5), (5, 9.5), (4, 8.5), (6, 5.5)), reverse=True)
self.assertFalse(intr, msg=msg)
def test_one_moving_one_stationary_along_path_intr_at_start(self):
fn = self._calc_one_moving_one_stat_along_path_fuzzer
# au01
intr, msg = fn(((5, 3.5), (5, 2.5), (3, -0.5), (-2, 0.5), (-3, 2.5), (-2, 4.5), (0, 6.5)), (0, 0), (9, 2), ((6.5, 6.5), (9.5, 0.5), (3.5, -0.5), (1.5, 2.5), (3.5, 6.5)))
self.assertTrue(intr, msg=msg)
# au02
intr, msg = fn(((6.5, 5.5), (4.5, 3.5), (2.5, 6.5), (2.5, 7.5), (6.5, 6.5)), (0, 0), (10, -5), ((6, 2.5), (1, -1.5), (-2, 2.5), (-2, 2.5), (3, 6.5)))
self.assertTrue(intr, msg=msg)
# au03
intr, msg = fn(((10.5, 3.5), (8.5, 2.5), (5.5, 6.5), (9.5, 8.5), (11.5, 6.5), (11.5, 5.5)), (0, 0), (3, -7), ((12, 1), (11, 0), (9, -3), (8, -3), (5, -1), (5, 4), (9, 5)))
self.assertTrue(intr, msg=msg)
# au04
intr, msg = fn(((3.5, 6), (-0.5, 5), (-0.5, 7), (-0.5, 8), (1.5, 9), (1.5, 9), (3.5, 7)), (0, 0), (-6, 9), ((7, 6), (5, 6), (4, 6), (3, 7), (5, 10), (7, 9)))
self.assertTrue(intr, msg=msg)
def test_one_moving_one_stationary_along_path_intr_later(self):
fn = self._calc_one_moving_one_stat_along_path_fuzzer
# av01
intr, msg = fn(((-5, 9), (-8, 7), (-9, 7), (-8, 11), (-5, 10)), (0, 0), (15, 2), ((4, 15.5), (5, 12.5), (0, 11.5), (1, 16.5)))
self.assertTrue(intr, msg=msg)
# av02
intr, msg = fn(((4.5, -0.5), (3.5, -2.5), (1.5, -3.5), (-0.5, 0.5), (-0.5, 1.5), (1.5, 2.5)), (0, 0), (13, 3), ((8, 6), (10, 6), (10, 4), (8, 4)))
self.assertTrue(intr, msg=msg)
# av03
intr, msg = fn(((3, 17.5), (3, 16.5), (1, 15.5), (-1, 15.5), (-1, 18.5), (0, 19.5)), (0, 0), (-3, -6), ((14.5, 13), (14.5, 9), (12.5, 9), (11.5, 12), (12.5, 13)))
self.assertTrue(intr, msg=msg)
# av04
intr, msg = fn(((-5, 2.5), (-8, 0.5), (-9, 1.5), (-8, 4.5), (-6, 4.5)), (0, 0), (12, -10), ((6, -1.5), (5, -3.5), (2, -2.5), (3, 0.5)))
self.assertTrue(intr, msg=msg)
def test_one_moving_one_stationary_along_path_touch_at_end(self):
fn = self._calc_one_moving_one_stat_along_path_fuzzer
# aw01
intr, msg = fn(((-2, 0.5), (-3, -0.5), (-4, 0.5), (-3, 1.5)), (0, 0), (7, 1), ((9, 0), (8, 0), (5, 1), (5, 3), (7, 4), (9, 4)))
self.assertFalse(intr, msg=msg)
# aw02
intr, msg = fn(((11, -3.5), (9, -5.5), (6, -4.5), (6, -1.5), (9, -1.5)), (0, 0), (-7, 10), ((14, 8), (14, 7), (12, 7), (13, 9)))
self.assertFalse(intr, msg=msg)
# aw03
intr, msg = fn(((3, 0.5), (2, 1.5), (2, 2.5), (4, 2.5)), (0, 0), (-0.5, 5), ((-0.5, 5), (-1.5, 5), (-2.5, 7), (-0.5, 9), (1.5, 8), (1.5, 7)))
self.assertFalse(intr, msg=msg)
# aw04
intr, msg = fn(((15, 4.5), (15, 2.5), (13, 3.5), (13, 4.5), (14, 4.5)), (0, 0), (-1, -9), ((12, -5), (11, -9), (8, -9), (10, -4)))
self.assertFalse(intr, msg=msg)
def test_one_moving_one_stationary_along_path_intr_after_end(self):
fn = self._calc_one_moving_one_stat_along_path_fuzzer
# ax01
intr, msg = fn(((-6.5, 3.5), (-7.5, 0.5), (-10.5, 1.5), (-8.5, 4.5)), (0, 0), (5, 0), ((1, 2.5), (1, 0.5), (-1, 0.5), (-1, 1.5), (0, 2.5)))
self.assertFalse(intr, msg=msg)
# ax02
intr, msg = fn(((1.5, 3.5), (0.5, 2.5), (-0.5, 2.5), (-0.5, 3.5), (0.5, 4.5)), (0, 0), (10, 4), ((17.5, 6), (14.5, 6), (12.5, 8), (14.5, 10), (17.5, 9)))
self.assertFalse(intr, msg=msg)
# ax03
intr, msg = fn(((1, 2), (0, 3), (0, 5), (1, 6), (4, 4)), (0, 0), (7, 3), ((14, 7.5), (13, 8.5), (15, 9.5), (15, 8.5)))
self.assertFalse(intr, msg=msg)
# ax04
intr, msg = fn(((2.5, -4), (1.5, -6), (0.5, -6), (-1.5, -4), (-0.5, -2), (2.5, -3)), (0, 0), (6, -1), ((12, -7), (10, -5), (10, -4), (14, -4)))
self.assertFalse(intr, msg=msg)
# calculate_one_moving_many_stationary
def _calc_one_moving_many_stat_fuzzer(self, poly1tup, poly1vec, other_poly_tups_arr):
fn = self.extr_intr.calculate_one_moving_many_stationary
poly1 = polygon2.Polygon2(list(vector2.Vector2(p) for p in poly1tup))
vec1 = vector2.Vector2(poly1vec)
other_polys_arr = list(polygon2.Polygon2(list(vector2.Vector2(p) for p in poly)) for poly in other_poly_tups_arr)
offset1 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
other_offsets = list(random.uniform(-1000, 1000) for poly in other_polys_arr)
newpoly1 = polygon2.Polygon2(list(p + offset1 for p in poly1.points))
other_polys_offsets_comb = list((polygon2.Polygon2(list(p + other_offsets[i] for p in other_polys_arr[i].points)), other_offsets[i]) for i in range(len(other_offsets)))
msg = "poly1={}\nvec1={}\noffset1={}\n\nOTHER POLYGONS:\n\n"
for ind, tup in enumerate(other_polys_offsets_comb):
poly = tup[0]
offset = tup[1]
msg = msg + "poly{}={}\noffset{}={}".format(ind, poly, ind, offset)
result = fn(poly1, offset1, vec1, other_polys_offsets_comb)
return result, msg
def test_one_moving_many_stationary_no_intr(self):
fn = self._calc_one_moving_many_stat_fuzzer
# ay01
intr, msg = fn(((3, 3), (4, 3), (4, 4), (3, 4)), (1, 1), [
((6, 3), (7, 3), (7, 4), (6, 4)),
((3, 6), (3, 7), (4, 7), (4, 6)),
((4, 10), (6, 11), (6, 8), (2, 7))
])
self.assertFalse(intr, msg=msg)
# ay02
intr, msg = fn(((-1, -9.5), (-1, -5.5), (3, -5.5), (4, -7.5)), (1, 2), [
((6, -6), (8, -7), (7, -9)),
((0, 2), (2, 3), (1, 1)),
((-2, -2), (-2, -1), (-1, -1), (-1, -2)),
((8, -4), (8, -3), (7, -3), (7, -4))
])
self.assertFalse(intr, msg=msg)
# ay03
intr, msg = fn(((18.5, 3), (17.5, 3), (17.5, 5), (19.5, 5)), (-1, 3), [
((18, 13), (20, 14), (18.5, 11)),
((5, 5), (6, 2), (3, 3), (2, 4))
])
self.assertFalse(intr, msg=msg)
# ay04
intr, msg = fn(((-6, 2), (-6, 1), (-8, 0), (-8, 2)), (10, 0), [
((-7, 3), (-7, 4), (-6, 4), (-6, 3)),
((-6, 3), (-6, 4), (-5, 4), (-5, 3)),
((-5, 3), (-5, 4), (-4, 4), (-4, 3)),
((-4, 3), (-4, 4), (-3, 4), (-3, 3))
])
self.assertFalse(intr, msg=msg)
def test_one_moving_many_stationary_touching(self):
pass
def test_one_moving_many_stationary_intr_at_start(self):
pass
def test_one_moving_many_stationary_intr_later(self):
pass
# calculate_one_moving_many_stationary_distancelimit
def test_one_moving_many_stationary_distlimit_no_intr(self):
pass
def test_one_moving_many_stationary_distlimit_touching(self):
pass
def test_one_moving_many_stationary_distlimit_intr_at_start(self):
pass
def test_one_moving_many_stationary_distlimit_intr_later(self):
pass
def test_one_moving_many_stationary_distlimit_touch_at_limit(self):
pass
def test_one_moving_many_stationary_distlimit_intr_after_limit(self):
pass
# calculate_one_moving_many_stationary_along_path
def test_one_moving_many_stationary_along_path_no_intr(self):
pass
def test_one_moving_many_stationary_along_path_touching(self):
pass
def test_one_moving_many_stationary_along_path_intr_at_start(self):
pass
def test_one_moving_many_stationary_along_path_intr_later(self):
pass
def test_one_moving_many_stationary_along_path_touch_at_limit(self):
pass
def test_one_moving_many_stationary_along_path_intr_after_limit(self):
pass
# calculate_two_moving
def test_two_moving_no_intr(self):
pass
def test_two_moving_touching_miss(self):
pass
def test_two_moving_touching_miss_diff_vel(self):
pass
def test_two_moving_intr_ones_start_but_later(self):
pass
def test_two_moving_intr_at_start(self):
pass
def test_two_moving_intr_later(self):
pass
def test_two_moving_intr_later_diff_vel(self):
pass
if __name__ == '__main__':
unittest.main() | TestExtrapolatedIntersection |
python | numpy__numpy | numpy/_core/tests/test_function_base.py | {
"start": 4115,
"end": 10531
} | class ____:
def test_basic(self):
y = geomspace(1, 1e6)
assert_(len(y) == 50)
y = geomspace(1, 1e6, num=100)
assert_(y[-1] == 10 ** 6)
y = geomspace(1, 1e6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = geomspace(1, 1e6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
y = geomspace(8, 2, num=3)
assert_allclose(y, [8, 4, 2])
assert_array_equal(y.imag, 0)
y = geomspace(-1, -100, num=3)
assert_array_equal(y, [-1, -10, -100])
assert_array_equal(y.imag, 0)
y = geomspace(-100, -1, num=3)
assert_array_equal(y, [-100, -10, -1])
assert_array_equal(y.imag, 0)
def test_boundaries_match_start_and_stop_exactly(self):
# make sure that the boundaries of the returned array exactly
# equal 'start' and 'stop' - this isn't obvious because
# np.exp(np.log(x)) isn't necessarily exactly equal to x
start = 0.3
stop = 20.3
y = geomspace(start, stop, num=1)
assert_equal(y[0], start)
y = geomspace(start, stop, num=1, endpoint=False)
assert_equal(y[0], start)
y = geomspace(start, stop, num=3)
assert_equal(y[0], start)
assert_equal(y[-1], stop)
y = geomspace(start, stop, num=3, endpoint=False)
assert_equal(y[0], start)
def test_nan_interior(self):
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:-1]).all())
assert_equal(y[3], 3.0)
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4, endpoint=False)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:]).all())
def test_complex(self):
# Purely imaginary
y = geomspace(1j, 16j, num=5)
assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
assert_array_equal(y.real, 0)
y = geomspace(-4j, -324j, num=5)
assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
assert_array_equal(y.real, 0)
y = geomspace(1 + 1j, 1000 + 1000j, num=4)
assert_allclose(y, [1 + 1j, 10 + 10j, 100 + 100j, 1000 + 1000j])
y = geomspace(-1 + 1j, -1000 + 1000j, num=4)
assert_allclose(y, [-1 + 1j, -10 + 10j, -100 + 100j, -1000 + 1000j])
# Logarithmic spirals
y = geomspace(-1, 1, num=3, dtype=complex)
assert_allclose(y, [-1, 1j, +1])
y = geomspace(0 + 3j, -3 + 0j, 3)
assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j])
y = geomspace(0 + 3j, 3 + 0j, 3)
assert_allclose(y, [0 + 3j, 3 / sqrt(2) + 3j / sqrt(2), 3 + 0j])
y = geomspace(-3 + 0j, 0 - 3j, 3)
assert_allclose(y, [-3 + 0j, -3 / sqrt(2) - 3j / sqrt(2), 0 - 3j])
y = geomspace(0 + 3j, -3 + 0j, 3)
assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j])
y = geomspace(-2 - 3j, 5 + 7j, 7)
assert_allclose(y, [-2 - 3j, -0.29058977 - 4.15771027j,
2.08885354 - 4.34146838j, 4.58345529 - 3.16355218j,
6.41401745 - 0.55233457j, 6.75707386 + 3.11795092j,
5 + 7j])
# Type promotion should prevent the -5 from becoming a NaN
y = geomspace(3j, -5, 2)
assert_allclose(y, [3j, -5])
y = geomspace(-5, 3j, 2)
assert_allclose(y, [-5, 3j])
def test_complex_shortest_path(self):
# test the shortest logarithmic spiral is used, see gh-25644
x = 1.2 + 3.4j
y = np.exp(1j * (np.pi - .1)) * x
z = np.geomspace(x, y, 5)
expected = np.array([1.2 + 3.4j, -1.47384 + 3.2905616j,
-3.33577588 + 1.36842949j, -3.36011056 - 1.30753855j,
-1.53343861 - 3.26321406j])
np.testing.assert_array_almost_equal(z, expected)
def test_dtype(self):
y = geomspace(1, 1e6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = geomspace(1, 1e6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = geomspace(1, 1e6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
# Native types
y = geomspace(1, 1e6, dtype=float)
assert_equal(y.dtype, dtype('float64'))
y = geomspace(1, 1e6, dtype=complex)
assert_equal(y.dtype, dtype('complex128'))
def test_start_stop_array_scalar(self):
lim1 = array([120, 100], dtype="int8")
lim2 = array([-120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = geomspace(lim1[0], lim1[1], 5)
t2 = geomspace(lim2[0], lim2[1], 5)
t3 = geomspace(lim3[0], lim3[1], 5)
t4 = geomspace(120.0, 100.0, 5)
t5 = geomspace(-120.0, -100.0, 5)
t6 = geomspace(1200.0, 1000.0, 5)
# t3 uses float32, t6 uses float64
assert_allclose(t1, t4, rtol=1e-2)
assert_allclose(t2, t5, rtol=1e-2)
assert_allclose(t3, t6, rtol=1e-5)
def test_start_stop_array(self):
# Try to use all special cases.
start = array([1.e0, 32., 1j, -4j, 1 + 1j, -1])
stop = array([1.e4, 2., 16j, -324j, 10000 + 10000j, 1])
t1 = geomspace(start, stop, 5)
t2 = stack([geomspace(_start, _stop, 5)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = geomspace(start, stop[0], 5)
t4 = stack([geomspace(_start, stop[0], 5)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = geomspace(start, stop, 5, axis=-1)
assert_equal(t5, t2.T)
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
gs = geomspace(a, b)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0))
gs = geomspace(a, b, 1)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0, 1))
def test_bounds(self):
assert_raises(ValueError, geomspace, 0, 10)
assert_raises(ValueError, geomspace, 10, 0)
assert_raises(ValueError, geomspace, 0, 0)
| TestGeomspace |
python | ultrajson__ultrajson | tests/fuzz.py | {
"start": 653,
"end": 1987
} | class ____:
"""A random JSON serialisable object generator."""
def __init__(self, seed=None):
self._randomizer = random.Random(seed)
self._shrink = 1
def key(self):
key_types = [self.int, self.float, self.string, self.null, self.bool]
return self._randomizer.choice(key_types)()
def item(self):
if self._randomizer.random() > 0.8:
return self.key()
return self._randomizer.choice([self.list, self.dict])()
def int(self):
return int(self.float())
def float(self):
sign = self._randomizer.choice([-1, 1, 0])
return sign * math.exp(self._randomizer.uniform(-40, 40))
def string(self):
characters = ["\x00", "\t", "a", "\U0001f680", "<></>", "\u1234"]
return self._randomizer.choice(characters) * self.length()
def bool(self):
return self._randomizer.random() < 0.5
def null(self):
return None
def list(self):
return [self.item() for i in range(self.length())]
def dict(self):
return {self.key(): self.item() for i in range(self.length())}
def length(self):
self._shrink *= 0.99
return int(math.exp(self._randomizer.uniform(-0.5, 5)) * self._shrink)
def random_object(seed=None):
return FuzzGenerator(seed).item()
| FuzzGenerator |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 113533,
"end": 141610
} | class ____(CType):
# return_type CType
# args [CFuncTypeArg]
# has_varargs boolean
# exception_value CFuncType.ExceptionValue or Node (for except+)
# exception_check boolean True if PyErr_Occurred check needed
# calling_convention string Function calling convention
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# templates [string] or None
# cached_specialized_types [CFuncType] cached specialized versions of the CFuncType if defined in a pxd
# from_fused boolean Indicates whether this is a specialized
# C function
# is_strict_signature boolean function refuses to accept coerced arguments
# (used for optimisation overrides)
# is_const_method boolean
# is_static_method boolean
# op_arg_struct CPtrType Pointer to optional argument struct
is_cfunction = 1
cached_specialized_types = None
from_fused = False
is_const_method = False
op_arg_struct = None
subtypes = ['return_type', 'args']
class ExceptionValue:
def __init__(self, python_value, c_repr, type):
self.python_value = python_value
self.c_repr = c_repr
self.type = type
def __eq__(self, other):
if not isinstance(other, CFuncType.ExceptionValue):
return NotImplemented
# only the python_value is used for equality comparison. This allows
# things like "-1 == -1.0" to be treated as the same function signature
return self.python_value == other.python_value
def __str__(self):
# Called for C code generation.
return str(self.c_repr)
def may_be_nan(self):
if not self.type.is_float:
return False
if not isinstance(self.python_value, (int, float)):
# A string representing an unknown C constant that might be NaN.
return True
# a known constant that evaluates to NaN
return self.python_value != self.python_value
def exception_test_code(self, result_cname, code) -> str:
typed_exc_val = self.type.cast_code(str(self))
if self.type.is_ctuple:
code.globalstate.use_utility_code(UtilityCode.load_cached(
"IncludeStringH", "StringTools.c"))
return f"memcmp(&{result_cname}, &{typed_exc_val}, sizeof({result_cname})) == 0"
elif self.may_be_nan():
# for floats, we may need to handle comparison with NaN
code.globalstate.use_utility_code(
UtilityCode.load_cached("FloatExceptionCheck", "Exceptions.c"))
return f"__PYX_CHECK_FLOAT_EXCEPTION({result_cname}, {typed_exc_val})"
else:
return f"{result_cname} == {typed_exc_val}"
def __init__(self, return_type, args, has_varargs = 0,
exception_value = None, exception_check = 0, calling_convention = "",
nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0,
is_const_method = False, is_static_method=False,
templates = None, is_strict_signature = False):
self.return_type = return_type
self.args = args
self.has_varargs = has_varargs
self.optional_arg_count = optional_arg_count
if (exception_value is not None and exception_check != '+' and
not isinstance(exception_value, self.ExceptionValue)):
# happens within Cython itself when writing custom function types
# for utility code functions.
exception_value = self.ExceptionValue(
exception_value, str(exception_value), return_type)
self.exception_value = exception_value
self.exception_check = exception_check
self.calling_convention = calling_convention
self.nogil = nogil
self.with_gil = with_gil
self.is_overridable = is_overridable
self.is_const_method = is_const_method
self.is_static_method = is_static_method
self.templates = templates
self.is_strict_signature = is_strict_signature
def __repr__(self):
arg_reprs = list(map(repr, self.args))
if self.has_varargs:
arg_reprs.append("...")
if self.exception_value is not None:
except_clause = " %r" % self.exception_value
else:
except_clause = ""
if self.exception_check:
except_clause += "?"
return "<CFuncType %s %s[%s]%s>" % (
repr(self.return_type),
self.calling_convention_prefix(),
",".join(arg_reprs),
except_clause)
def with_with_gil(self, with_gil):
if with_gil == self.with_gil:
return self
else:
return CFuncType(
self.return_type, self.args, self.has_varargs,
self.exception_value, self.exception_check,
self.calling_convention, self.nogil,
with_gil,
self.is_overridable, self.optional_arg_count,
self.is_const_method, self.is_static_method,
self.templates, self.is_strict_signature)
def calling_convention_prefix(self):
cc = self.calling_convention
if cc:
return cc + " "
else:
return ""
def as_argument_type(self):
return c_ptr_type(self)
def same_c_signature_as(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(
other_type.resolve(), as_cmethod)
def same_c_signature_as_resolved_type(self, other_type, as_cmethod=False, as_pxd_definition=False,
exact_semantics=True):
# If 'exact_semantics' is false, allow any equivalent C signatures
# if the Cython semantics are compatible, i.e. the same or wider for 'other_type'.
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
if self.is_overridable != other_type.is_overridable:
return 0
nargs = len(self.args)
if nargs != len(other_type.args):
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, nargs):
if not self.args[i].type.same_as(other_type.args[i].type):
return 0
if self.has_varargs != other_type.has_varargs:
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
if as_pxd_definition:
# A narrowing of the return type declared in the pxd is allowed.
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
else:
if not self.return_type.same_as(other_type.return_type):
return 0
if not self.same_calling_convention_as(other_type):
return 0
if exact_semantics:
if self.exception_check != other_type.exception_check:
return 0
if not self._same_exception_value(other_type.exception_value):
return 0
elif not self._is_exception_compatible_with(other_type):
return 0
return 1
def _same_exception_value(self, other_exc_value):
# Use fallback comparison as strings since we usually read exception values as strings.
if self.exception_value == other_exc_value or str(self.exception_value) == str(other_exc_value):
return 1
if self.exception_check != '+':
return 0
if not self.exception_value or not other_exc_value:
return 0
if self.exception_value.type != other_exc_value.type:
return 0
if self.exception_value.entry and other_exc_value.entry:
if self.exception_value.entry.cname != other_exc_value.entry.cname:
return 0
if self.exception_value.name != other_exc_value.name:
return 0
return 1
def compatible_signature_with(self, other_type, as_cmethod = 0):
return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod)
def compatible_signature_with_resolved_type(self, other_type, as_cmethod):
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
if not self.is_overridable and other_type.is_overridable:
return 0
nargs = len(self.args)
if nargs - self.optional_arg_count != len(other_type.args) - other_type.optional_arg_count:
return 0
if self.optional_arg_count < other_type.optional_arg_count:
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, len(other_type.args)):
if not self.args[i].type.same_as(
other_type.args[i].type):
return 0
if self.has_varargs != other_type.has_varargs:
return 0
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
if not self.same_calling_convention_as(other_type):
return 0
if self.nogil != other_type.nogil:
return 0
if not self._is_exception_compatible_with(other_type):
return 0
return 1
def _is_exception_compatible_with(self, other_type):
# narrower exception checks are ok, but prevent mismatches
if self.exception_check == '+' and other_type.exception_check != '+':
# must catch C++ exceptions if we raise them
return 0
if not other_type.exception_check or other_type.exception_value is not None:
# There's no problem if this type doesn't emit exceptions but the other type checks
if other_type.exception_check and not (self.exception_check or self.exception_value):
return 1
# if other does not *always* check exceptions, self must comply
if not self._same_exception_value(other_type.exception_value):
return 0
if self.exception_check and self.exception_check != other_type.exception_check:
# a redundant exception check doesn't make functions incompatible, but a missing one does
return 0
return 1
def narrower_c_signature_than(self, other_type, as_cmethod = 0):
return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod)
def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod):
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
nargs = len(self.args)
if nargs != len(other_type.args):
return 0
for i in range(as_cmethod, nargs):
if not self.args[i].type.subtype_of_resolved_type(other_type.args[i].type):
return 0
else:
self.args[i].needs_type_test = other_type.args[i].needs_type_test \
or not self.args[i].type.same_as(other_type.args[i].type)
if self.has_varargs != other_type.has_varargs:
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
if not self.exception_check and other_type.exception_check:
# a redundant exception check doesn't make functions incompatible, but a missing one does
return 0
if not self._same_exception_value(other_type.exception_value):
return 0
return 1
def same_calling_convention_as(self, other):
## XXX Under discussion ...
## callspec_words = ("__stdcall", "__cdecl", "__fastcall")
## cs1 = self.calling_convention
## cs2 = other.calling_convention
## if (cs1 in callspec_words or
## cs2 in callspec_words):
## return cs1 == cs2
## else:
## return True
sc1 = self.calling_convention == '__stdcall'
sc2 = other.calling_convention == '__stdcall'
return sc1 == sc2
def same_as_resolved_type(self, other_type, as_cmethod=False):
return self.same_c_signature_as_resolved_type(other_type, as_cmethod=as_cmethod) \
and self.nogil == other_type.nogil
def pointer_assignable_from_resolved_type(self, rhs_type):
# Accept compatible exception/nogil declarations for the RHS.
if rhs_type is error_type:
return 1
if not rhs_type.is_cfunction:
return 0
return rhs_type.same_c_signature_as_resolved_type(self, exact_semantics=False) \
and not (self.nogil and not rhs_type.nogil)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0,
with_calling_convention = 1):
arg_decl_list = []
for arg in self.args[:len(self.args)-self.optional_arg_count]:
arg_decl_list.append(
arg.type.declaration_code("", for_display, pyrex = pyrex))
if self.is_overridable:
arg_decl_list.append("int %s" % Naming.skip_dispatch_cname)
if self.optional_arg_count:
if self.op_arg_struct:
arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname))
else:
# op_arg_struct may not be initialized at this point if this class is being used
# to prepare a Python error message or similar. In this case, just omit the args.
assert for_display
if self.has_varargs:
arg_decl_list.append("...")
arg_decl_code = ", ".join(arg_decl_list)
if not arg_decl_code and not pyrex:
arg_decl_code = "void"
trailer = ""
if (pyrex or for_display) and not self.return_type.is_pyobject:
if self.exception_value is not None and self.exception_check:
trailer = " except? %s" % self.exception_value
elif self.exception_value is not None and not self.exception_check:
trailer = " except %s" % self.exception_value
elif self.exception_value is None and not self.exception_check:
trailer = " noexcept"
elif self.exception_check == '+':
trailer = " except +"
elif self.exception_check and for_display:
# not spelled out by default, unless for human eyes
trailer = " except *"
if self.nogil:
trailer += " nogil"
if not with_calling_convention:
cc = ''
else:
cc = self.calling_convention_prefix()
if (not entity_code and cc) or entity_code.startswith("*"):
entity_code = "(%s%s)" % (cc, entity_code)
cc = ""
if self.is_const_method:
trailer += " const"
return self.return_type.declaration_code(
"%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer),
for_display, dll_linkage, pyrex)
def function_header_code(self, func_name, arg_code):
if self.is_const_method:
trailer = " const"
else:
trailer = ""
return "%s%s(%s)%s" % (self.calling_convention_prefix(),
func_name, arg_code, trailer)
def signature_string(self):
s = self.empty_declaration_code()
return s
def signature_cast_string(self):
s = self.declaration_code("(*)", with_calling_convention=False)
return '(%s)' % s
def specialize(self, values):
result = CFuncType(self.return_type.specialize(values),
[arg.specialize(values) for arg in self.args],
has_varargs = self.has_varargs,
exception_value = self.exception_value,
exception_check = self.exception_check,
calling_convention = self.calling_convention,
nogil = self.nogil,
with_gil = self.with_gil,
is_overridable = self.is_overridable,
optional_arg_count = self.optional_arg_count,
is_const_method = self.is_const_method,
is_static_method = self.is_static_method,
templates = self.templates)
result.from_fused = self.is_fused
return result
def opt_arg_cname(self, arg_name):
return self.op_arg_struct.base_type.scope.lookup(arg_name).cname
# Methods that deal with Fused Types
# All but map_with_specific_entries should be called only on functions
# with fused types (and not on their corresponding specific versions).
def get_all_specialized_permutations(self, fused_types=None):
"""
Permute all the types. For every specific instance of a fused type, we
want all other specific instances of all other fused types.
It returns an iterable of two-tuples of the cname that should prefix
the cname of the function, and a dict mapping any fused types to their
respective specific types.
"""
assert self.is_fused
if fused_types is None:
fused_types = self.get_fused_types()
return get_all_specialized_permutations(fused_types)
def get_all_specialized_function_types(self):
"""
Get all the specific function types of this one.
"""
assert self.is_fused
if self.entry.fused_cfunction:
return [n.type for n in self.entry.fused_cfunction.nodes]
elif self.cached_specialized_types is not None:
return self.cached_specialized_types
result = []
permutations = self.get_all_specialized_permutations()
new_cfunc_entries = []
for cname, fused_to_specific in permutations:
new_func_type = self.entry.type.specialize(fused_to_specific)
if self.optional_arg_count:
# Remember, this method is set by CFuncDeclaratorNode
self.declare_opt_arg_struct(new_func_type, cname)
new_entry = copy.deepcopy(self.entry)
new_func_type.specialize_entry(new_entry, cname)
new_entry.type = new_func_type
new_func_type.entry = new_entry
result.append(new_func_type)
new_cfunc_entries.append(new_entry)
cfunc_entries = self.entry.scope.cfunc_entries
try:
cindex = cfunc_entries.index(self.entry)
except ValueError:
cfunc_entries.extend(new_cfunc_entries)
else:
cfunc_entries[cindex:cindex+1] = new_cfunc_entries
self.cached_specialized_types = result
return result
def get_fused_types(self, result=None, seen=None, subtypes=None, include_function_return_type=False):
"""Return fused types in the order they appear as parameter types"""
return super().get_fused_types(
result, seen,
# for function pointer types, we consider the result type; for plain function
# types we don't (because it must be derivable from the arguments)
subtypes=self.subtypes if include_function_return_type else ['args'])
def specialize_entry(self, entry, cname):
assert not self.is_fused
specialize_entry(entry, cname)
def can_coerce_to_pyobject(self, env):
# duplicating the decisions from create_to_py_utility_code() here avoids writing out unused code
if self.has_varargs or self.optional_arg_count:
return False
if self.to_py_function is not None:
return self.to_py_function
for arg in self.args:
if not arg.type.is_pyobject and not arg.type.can_coerce_to_pyobject(env):
return False
if not self.return_type.is_pyobject and not self.return_type.can_coerce_to_pyobject(env):
return False
return True
def create_to_py_utility_code(self, env):
# FIXME: it seems we're trying to coerce in more cases than we should
if self.to_py_function is not None:
return self.to_py_function
if not self.can_coerce_to_pyobject(env):
return False
from .UtilityCode import CythonUtilityCode
# include argument names into the c function name to ensure cname is unique
# between functions with identical types but different argument names
from .Symtab import punycodify_name
def arg_name_part(arg):
return "%s%s" % (len(arg.name), punycodify_name(arg.name)) if arg.name else "0"
arg_names = [ arg_name_part(arg) for arg in self.args ]
arg_names = cap_length("_".join(arg_names))
safe_typename = type_identifier(self, pyrex=True)
# Note that the length here is slightly bigger than twice the default cap in
# "cap_length" (since the length is capped in both arg_names and the type_identifier)
# but since this is significantly shorter than compilers should be able to handle,
# that is acceptable.
to_py_function = "__Pyx_CFunc_%s_to_py_%s" % (safe_typename, arg_names)
for arg in self.args:
if not arg.type.is_pyobject and not arg.type.create_from_py_utility_code(env):
return False
if not self.return_type.is_pyobject and not self.return_type.create_to_py_utility_code(env):
return False
def declared_type(ctype):
type_displayname = str(ctype.declaration_code("", for_display=True))
if ctype.is_pyobject:
arg_ctype = type_name = type_displayname
if ctype.is_builtin_type:
arg_ctype = ctype.name
elif not ctype.is_extension_type:
type_name = 'object'
type_displayname = None
else:
type_displayname = repr(type_displayname)
elif ctype is c_bint_type:
type_name = arg_ctype = 'bint'
else:
type_name = arg_ctype = type_displayname
if ctype is c_double_type:
type_displayname = 'float'
else:
type_displayname = repr(type_displayname)
return type_name, arg_ctype, type_displayname
class Arg:
def __init__(self, arg_name, arg_type):
self.name = arg_name
self.type = arg_type
self.type_cname, self.ctype, self.type_displayname = declared_type(arg_type)
if self.return_type.is_void:
except_clause = 'except *'
elif self.return_type.is_pyobject:
except_clause = ''
elif self.exception_value is not None:
except_clause = ('except? %s' if self.exception_check else 'except %s') % self.exception_value
else:
except_clause = 'except *'
context = {
'cname': to_py_function,
'args': [Arg(arg.name or 'arg%s' % ix, arg.type) for ix, arg in enumerate(self.args)],
'return_type': Arg('return', self.return_type),
'except_clause': except_clause,
}
# FIXME: directives come from first defining environment and do not adapt for reuse
env.use_utility_code(CythonUtilityCode.load(
"cfunc.to_py", "CConvert.pyx",
outer_module_scope=env.global_scope(), # need access to types declared in module
context=context, compiler_directives=dict(env.global_scope().directives)))
self.to_py_function = to_py_function
return True
def specialize_entry(entry, cname):
"""
Specialize an entry of a copied fused function or method
"""
entry.is_fused_specialized = True
entry.name = get_fused_cname(cname, entry.name)
if entry.is_cmethod:
entry.cname = entry.name
if entry.is_inherited:
entry.cname = StringEncoding.EncodedString(
"%s.%s" % (Naming.obj_base_cname, entry.cname))
else:
entry.cname = get_fused_cname(cname, entry.cname)
if entry.func_cname:
entry.func_cname = get_fused_cname(cname, entry.func_cname)
if entry.final_func_cname:
entry.final_func_cname = get_fused_cname(cname, entry.final_func_cname)
def get_fused_cname(fused_cname, orig_cname):
"""
Given the fused cname id and an original cname, return a specialized cname
"""
assert fused_cname and orig_cname
return StringEncoding.EncodedString('%s%s%s' % (Naming.fused_func_prefix,
fused_cname, orig_cname))
def unique(somelist):
seen = set()
result = []
for obj in somelist:
if obj not in seen:
result.append(obj)
seen.add(obj)
return result
def get_all_specialized_permutations(fused_types):
return _get_all_specialized_permutations(unique(fused_types))
def _get_all_specialized_permutations(fused_types, id="", f2s=()):
fused_type, = fused_types[0].get_fused_types()
result = []
for newid, specific_type in enumerate(fused_type.types):
# f2s = dict(f2s, **{ fused_type: specific_type })
f2s = dict(f2s)
f2s.update({ fused_type: specific_type })
if id:
cname = '%s_%s' % (id, newid)
else:
cname = str(newid)
if len(fused_types) > 1:
result.extend(_get_all_specialized_permutations(
fused_types[1:], cname, f2s))
else:
result.append((cname, f2s))
return result
def specialization_signature_string(fused_compound_type, fused_to_specific):
"""
Return the signature for a specialization of a fused type. e.g.
floating[:] ->
'float' or 'double'
cdef fused ft:
float[:]
double[:]
ft ->
'float[:]' or 'double[:]'
integral func(floating) ->
'int (*func)(float)' or ...
"""
fused_types = fused_compound_type.get_fused_types()
if len(fused_types) == 1:
fused_type = fused_types[0]
else:
fused_type = fused_compound_type
return fused_type.specialize(fused_to_specific).typeof_name()
def get_specialized_types(type):
"""
Return a list of specialized types in their declared order.
"""
assert type.is_fused
if isinstance(type, FusedType):
result = list(type.types)
for specialized_type in result:
specialized_type.specialization_string = specialized_type.typeof_name()
else:
result = []
for cname, f2s in get_all_specialized_permutations(type.get_fused_types()):
specialized_type = type.specialize(f2s)
specialized_type.specialization_string = (
specialization_signature_string(type, f2s))
result.append(specialized_type)
return result
| CFuncType |
python | getsentry__sentry | src/sentry/snuba/metrics/datasource.py | {
"start": 19205,
"end": 38219
} | class ____:
"""Fields and values to filter queries when exceeding the Snuba query limit.
Snuba imposes a limit on the number of rows that can be queried and
returned. This limit can be exceeded when grouping metrics by one or more
tags. In this case, we take the first groups returned by Snuba and filter
subsequent queries with this set of tag values.
Fields:
- ``keys``: A tuple containing resolved tag names ("tag[123]") in the order
of the ``groupBy`` clause.
- ``aliased_keys``: A tuple containing the group column name aliases
- ``values``: A list of tuples containing the tag values of the group keys.
The list is in the order returned by Snuba. The tuple elements are ordered
like ``keys``.
- ``conditions``: A list of raw snuba query conditions to filter subsequent
queries by.
"""
keys: tuple[Groupable, ...]
aliased_keys: tuple[str, ...]
values: list[tuple[int, ...]]
conditions: ConditionGroup
def _get_group_limit_filters(
metrics_query: DeprecatingMetricsQuery,
results: list[Mapping[str, int]],
use_case_id: UseCaseID,
) -> GroupLimitFilters | None:
if not metrics_query.groupby or not results:
return None
# Creates a mapping of groupBy fields to their equivalent SnQL
key_to_condition_dict: dict[Groupable, Any] = {}
for metric_groupby_obj in metrics_query.groupby:
key_to_condition_dict[metric_groupby_obj.name] = (
SnubaQueryBuilder.generate_snql_for_action_by_fields(
metric_action_by_field=metric_groupby_obj,
use_case_id=use_case_id,
org_id=metrics_query.org_id,
projects=Project.objects.get_many_from_cache(metrics_query.project_ids),
is_column=True,
)
)
aliased_group_keys = tuple(
metric_groupby_obj.alias
for metric_groupby_obj in metrics_query.groupby
if metric_groupby_obj.alias is not None
)
# Get an ordered list of tuples containing the values of the group keys.
# This needs to be deduplicated since in timeseries queries the same
# grouping key will reappear for every time bucket.
# If there is only one value, then we don't need to preserve the order with tuples
values = list({tuple(row[col] for col in aliased_group_keys): None for row in results})
conditions = []
if len(aliased_group_keys) > 1:
conditions = [
Condition(
Function("tuple", list(key_to_condition_dict.values())),
Op.IN,
Function("tuple", values),
)
]
# In addition to filtering down on the tuple combination of the fields in
# the group by columns, we need a separate condition for each of the columns
# in the group by with their respective values so Clickhouse can filter the
# results down before checking for the group by column combinations.
values_by_column = {
key: list({row[aliased_key] for row in results})
for key, aliased_key in zip(key_to_condition_dict.keys(), aliased_group_keys)
}
conditions += [
Condition(key_to_condition_dict[col], Op.IN, Function("tuple", col_values))
for col, col_values in values_by_column.items()
]
return GroupLimitFilters(
keys=tuple(key_to_condition_dict.keys()),
aliased_keys=aliased_group_keys,
values=values,
conditions=conditions,
)
def _apply_group_limit_filters(query: Query, filters: GroupLimitFilters) -> Query:
where = list(filters.conditions)
for condition in query.where or []:
# If query is grouped by project_id, then we should remove the original
# condition project_id cause it might be more relaxed than the project_id
# condition in the second query. This does not improve performance, but the
# readability of the query.
if not (
isinstance(condition, Condition)
and isinstance(condition.lhs, Column)
and condition.lhs.name == "project_id"
and "project_id" in filters.keys
):
where.append(condition)
# The initial query already selected the "page", so reset the offset
return query.set_where(where).set_offset(0)
def _sort_results_by_group_filters(
results: list[dict[str, Any]], filters: GroupLimitFilters
) -> list[dict[str, Any]]:
# Create a dictionary that has keys representing the ordered by tuples from the
# initial query, so that we are able to order it easily in the next code block
# If for example, we are grouping by (project_id, transaction) -> then this
# logic will output a dictionary that looks something like, where `tags[1]`
# represents transaction
# {
# (3, 2): [{"metric_id": 4, "project_id": 3, "tags[1]": 2, "p50": [11.0]}],
# (3, 3): [{"metric_id": 4, "project_id": 3, "tags[1]": 3, "p50": [5.0]}],
# }
rows_by_group_values: dict[tuple[int, ...], list[dict[str, Any]]] = {}
for row in results:
group_values = tuple(row[col] for col in filters.aliased_keys)
rows_by_group_values.setdefault(group_values, []).append(row)
# Order the results according to the results of the initial query, so that when
# the results dict is passed on to `SnubaResultsConverter`, it comes out ordered
# Ordered conditions might for example look something like this
# {..., ('project_id', 'tags[1]'): [(3, 3), (3, 2)]}, then we end up with
# {
# "totals": {
# "data": [
# {
# "metric_id": 5, "project_id": 3, "tags[1]": 3, "count_unique": 5
# },
# {
# "metric_id": 5, "project_id": 3, "tags[1]": 2, "count_unique": 1
# },
# ]
# }
# }
sorted = []
for group_values in filters.values:
sorted += rows_by_group_values.get(group_values, [])
return sorted
def _prune_extra_groups(results: dict, filters: GroupLimitFilters) -> None:
valid_values = set(filters.values)
for _entity, queries in results.items():
for key, query_results in queries.items():
filtered = []
for row in query_results["data"]:
group_values = tuple(row[col] for col in filters.aliased_keys)
if group_values in valid_values:
filtered.append(row)
queries[key]["data"] = filtered
def get_series(
projects: Sequence[Project],
metrics_query: DeprecatingMetricsQuery,
use_case_id: UseCaseID,
include_meta: bool = False,
tenant_ids: dict[str, Any] | None = None,
) -> dict:
"""Get time series for the given query"""
organization_id = projects[0].organization_id if projects else None
tenant_ids = dict()
if organization_id is not None:
tenant_ids["organization_id"] = organization_id
tenant_ids["use_case_id"] = use_case_id.value
if metrics_query.interval is not None:
interval = metrics_query.interval
else:
interval = metrics_query.granularity.granularity
start, end, _num_intervals = to_intervals(metrics_query.start, metrics_query.end, interval)
metrics_query = replace(metrics_query, start=start, end=end)
assert metrics_query.start is not None
assert metrics_query.end is not None
intervals = list(
get_intervals(
metrics_query.start,
metrics_query.end,
metrics_query.granularity.granularity,
interval=metrics_query.interval,
)
)
results: dict[str, dict[str, Any]] = {}
meta = []
fields_in_entities = {}
if not metrics_query.groupby:
# When there is no groupBy columns specified, we don't want to go through running an
# initial query first to get the groups because there are no groups, and it becomes just
# one group which is basically identical to eliminating the orderBy altogether
metrics_query = replace(metrics_query, orderby=None)
# It is important to understand that str fields in the order by always refer to a simple column, which at the
# time of writing this comment is only the project_id column. Because you can't select with a str directly,
# we need to run some logic to account for that. The idea is that snuba will automatically "select" any field in
# the group by therefore if we want to order by str field "x" we must always group by "x" in order to have it
# injected in the select by Snuba. We decided for this approach because it allows us to avoid writing derived ops
# for fetching simple columns.
#
# Our goal is to treat order by str fields transparently, that means, we treat them as they are not in the order by.
# This means:
# - If we only have str fields in the order by -> we just run the logic as if the order by was empty.
# - If we have a mix of str and MetricField fields in the order by -> we run the order by logic by selecting in the
# first query only the MetricField-based fields, but we keep the group by and order by intact. Because we know
# that the group by must contain all the str fields specified in the order by we know that they will be returned
# by the first query, thus we will have the full result set with the proper ordering.
#
# If we wouldn't run this logic, we will enter all cases in the order by branch which will fail because no
# str-based fields can be injected into the select.
orderby_contains_only_str_fields = True
if metrics_query.orderby is not None:
for orderby in metrics_query.orderby:
if isinstance(orderby.field, MetricField):
orderby_contains_only_str_fields = False
break
if metrics_query.orderby is not None and not orderby_contains_only_str_fields:
# ToDo(ahmed): Now that we have conditional aggregates as select statements, we might be
# able to shave off a query here. we only need the other queries for fields spanning other
# entities otherwise if all the fields belong to one entity then there is no need
# There is a known limitation that since we make two queries, where we use the results of
# the first query to filter down the results of the second query, so if the field used to
# order by has no values for certain transactions for example in the case of the
# performance table, we might end up showing less transactions than there actually are if
# we choose to order by it. We are limited by the rows available for the field used in
# the orderBy.
# Multi-field select with order by functionality. Currently only supports the
# performance table.
original_select = copy(metrics_query.select)
# This logic is in place because we don't want to put the project_id in the select, as it would require
# a DerivedOp, therefore
# Because ondemand queries skip validation this next block will result in no fields in the select
if not metrics_query.skip_orderby_validation:
orderby_fields = []
for select_field in metrics_query.select:
for orderby in metrics_query.orderby:
if select_field == orderby.field:
orderby_fields.append(select_field)
metrics_query = replace(metrics_query, select=orderby_fields)
snuba_queries, _ = SnubaQueryBuilder(
projects, metrics_query, use_case_id
).get_snuba_queries()
if len(snuba_queries) > 1:
# Currently accepting an order by field that spans multiple entities is not
# supported, but it might change in the future. Even then, it might be better
# handled on the snuba side of things
raise InvalidParams(
"Order by queries over multiple entities are not supported in "
"multi-field select with order by clause queries"
)
try:
# This query contains an order by clause, and so we are only interested in the
# "totals" query
initial_snuba_query = next(iter(snuba_queries.values()))["totals"]
request = Request(
dataset=Dataset.Metrics.value,
app_id="default",
query=initial_snuba_query,
tenant_ids=tenant_ids,
)
initial_query_results = raw_snql_query(
request, use_cache=False, referrer="api.metrics.totals.initial_query"
)
initial_query_results_data = initial_query_results["data"]
meta.extend(initial_query_results["meta"])
except StopIteration:
# This can occur when requesting a list of derived metrics that are not have no data
# for the passed projects
initial_query_results_data = []
# If we do not get any results from the first query, then there is no point in making
# the second query
if initial_query_results_data:
# We no longer want the order by in the 2nd query because we already have the order of
# the group by tags from the first query so we basically remove the order by columns,
# and reset the query fields to the original fields because in the second query,
# we want to query for all the metrics in the request api call
metrics_query = replace(metrics_query, select=original_select, orderby=None)
query_builder = SnubaQueryBuilder(projects, metrics_query, use_case_id)
snuba_queries, fields_in_entities = query_builder.get_snuba_queries()
group_limit_filters = _get_group_limit_filters(
metrics_query, initial_query_results_data, use_case_id
)
# This loop has constant time complexity as it will always have a maximum of
# three queries corresponding to the three available entities:
# ["metrics_sets", "metrics_distributions", "metrics_counters"]
for entity, queries in snuba_queries.items():
results.setdefault(entity, {})
for key, snuba_query in queries.items():
if group_limit_filters:
snuba_query = _apply_group_limit_filters(snuba_query, group_limit_filters)
request = Request(
dataset=Dataset.Metrics.value,
app_id="default",
query=snuba_query,
tenant_ids=tenant_ids,
)
snuba_result = raw_snql_query(
request, use_cache=False, referrer=f"api.metrics.{key}.second_query"
)
snuba_result_data = snuba_result["data"]
meta.extend(snuba_result["meta"])
# Since we removed the orderBy from all subsequent queries,
# we need to sort the results manually. This is required for
# the paginator, since it always queries one additional row
# and removes it at the end.
if group_limit_filters:
snuba_result_data = _sort_results_by_group_filters(
snuba_result_data, group_limit_filters
)
results[entity][key] = {"data": snuba_result_data}
else:
snuba_queries, fields_in_entities = SnubaQueryBuilder(
projects, metrics_query, use_case_id
).get_snuba_queries()
group_limit_filters_2: GroupLimitFilters | None = None
for entity, queries in snuba_queries.items():
results.setdefault(entity, {})
for key, snuba_query in queries.items():
if group_limit_filters_2:
snuba_query = _apply_group_limit_filters(snuba_query, group_limit_filters_2)
request = Request(
dataset=Dataset.Metrics.value,
app_id="default",
query=snuba_query,
tenant_ids=tenant_ids,
)
snuba_result = raw_snql_query(
request,
use_cache=False,
referrer=f"api.metrics.{key}",
)
snuba_result_data = snuba_result["data"]
meta.extend(snuba_result["meta"])
snuba_limit = snuba_query.limit.limit if snuba_query.limit else None
if (
not group_limit_filters_2
and snuba_limit
and len(snuba_result_data) == snuba_limit
):
group_limit_filters_2 = _get_group_limit_filters(
metrics_query, snuba_result_data, use_case_id
)
# We're now applying a filter that past queries may not have
# had. To avoid partial results, remove extra groups that
# aren't in the filter retroactively.
if group_limit_filters_2:
_prune_extra_groups(results, group_limit_filters_2)
results[entity][key] = {"data": snuba_result_data}
org_id = projects[0].organization_id
assert projects
converter = SnubaResultConverter(
org_id,
metrics_query,
fields_in_entities,
intervals,
results,
use_case_id,
)
# Translate applies only on ["data"]
result_groups = converter.translate_result_groups()
# It can occur, when we make queries that are not ordered, that we end up with a number of
# groups that doesn't meet the limit of the query for each of the entities, and hence they
# don't go through the pruning logic resulting in a total number of groups that is greater
# than the limit, and hence we need to prune those excess groups
assert metrics_query.limit is not None
if len(result_groups) > metrics_query.limit.limit:
result_groups = result_groups[0 : metrics_query.limit.limit]
groupby_aliases = (
{
metric_groupby_obj.alias: metric_groupby_obj
for metric_groupby_obj in metrics_query.groupby
if metric_groupby_obj.alias is not None
}
if metrics_query.groupby
else {}
)
return {
"start": metrics_query.start,
"end": metrics_query.end,
"intervals": intervals,
"groups": result_groups,
"meta": (
translate_meta_results(
meta=meta,
alias_to_metric_field=converter._alias_to_metric_field,
alias_to_metric_group_by_field=groupby_aliases,
)
if include_meta
else []
),
}
| GroupLimitFilters |
python | getsentry__sentry | src/sentry/notifications/notification_action/action_validation.py | {
"start": 3954,
"end": 4402
} | class ____(BaseActionValidatorHandler):
notify_action_form = IntegrationNotifyServiceForm
def generate_action_form_data(self) -> dict[str, Any]:
return {
"integration": self.validated_data["integration_id"],
}
def update_action_data(self, cleaned_data: dict[str, Any]) -> dict[str, Any]:
return self.validated_data
@action_validator_registry.register(Action.Type.JIRA)
| TicketingActionValidatorHandler |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/aot_autograd_result.py | {
"start": 21469,
"end": 25018
} | class ____(
GenericAOTAutogradResult[
BundledCompiledForward[TOutputCode], BundledCompiledBackward[TOutputCode]
],
Generic[TOutputCode],
):
"""
Generic AOTAutogradResult where we bundle the entire OutputCode directly
(rather than looking it up via FxGraphCache).
This works with any OutputCode type:
- CompiledFxGraph: Traditional inductor compilation
- RegionalOutputCode: Regional inductor compilation with GraphPickler serialization
- Any future OutputCode subclasses
Type parameter:
TOutputCode: The OutputCode subclass (e.g., CompiledFxGraph, RegionalOutputCode)
Usage with CompiledFxGraph:
entry = BundledAOTAutogradResult[CompiledFxGraph](
compiled_fw=BundledCompiledForward(result=CompiledFxGraph(...)),
compiled_bw=BundledCompiledBackward(
result=CompiledFxGraph(...),
backward_state_indices=[...],
num_symints_saved_for_bw_=...,
),
...
)
Usage with RegionalOutputCode:
entry = BundledAOTAutogradResult[RegionalOutputCode](
compiled_fw=BundledCompiledForward(result=RegionalOutputCode(gm)),
compiled_bw=BundledCompiledBackward(
result=RegionalOutputCode(gm),
backward_state_indices=[...],
num_symints_saved_for_bw_=...,
),
...
)
"""
def deserialize_bundled_cache_entry(entry: BundledAOTAutogradResult) -> Callable:
from copy import deepcopy
from torch._inductor.cudagraph_utils import BoxedDeviceIndex
from torch._inductor.utils import BoxedBool
# In the precompile use case, guards are already serialized
# by dynamo, so we don't need to add them to the environment
entry.guards_expr = None
# TODO: this isn't exactly right, because cudagraphs needs to be a shared config
# which is set by compile_fx. But in precompile, we never actually call compile_fx
# so we don't have a place to track cudagraphs here.
cudagraphs = BoxedBool(torch._inductor.config.triton.cudagraphs)
boxed_forward_device_index = BoxedDeviceIndex(None)
# We need to make a clean copy of the cache entry
# in case it needs to be serialized again
serializable_copy = deepcopy(entry)
from torch._subclasses import FakeTensorMode
from torch.fx.experimental.symbolic_shapes import ShapeEnv
context = torch._guards.TracingContext.try_get()
if context is None:
# Create a clean environment when running fx graph post compile
# if one is not available
context = torch._guards.TracingContext(FakeTensorMode(shape_env=ShapeEnv()))
with torch._guards.tracing(context):
compiled_fn = entry.wrap_post_compile(
[],
entry.sanitized_aot_config,
{
"cudagraphs": cudagraphs,
"boxed_forward_device_index": boxed_forward_device_index,
},
)
# Ensure the deserialized cache entry is still serializable
compiled_fn = SerializableCompiledFunction(compiled_fn, lambda: serializable_copy)
# TODO: this ignores flat_params, which can exist
# if inline_builtin_nn_modules=False
@simple_wraps(compiled_fn)
def forward(*runtime_args: tuple[Any]):
return compiled_fn(list(runtime_args))
assert hasattr(compiled_fn, "serialize")
forward.serialize = compiled_fn.serialize # type: ignore[attr-defined]
return forward
@dataclass
| BundledAOTAutogradResult |
python | tensorflow__tensorflow | tensorflow/python/data/ops/snapshot_op.py | {
"start": 2279,
"end": 4707
} | class ____(dataset_ops.UnaryUnchangedStructureDataset):
"""A dataset that allows saving and re-use of already processed data."""
def __init__(self,
input_dataset,
path,
shard_func,
compression=None,
reader_func=None,
pending_snapshot_expiry_seconds=None,
use_legacy_function=False,
name=None):
if reader_func is None:
reader_func = lambda datasets: datasets.interleave( # pylint:disable=g-long-lambda
lambda x: x,
cycle_length=multiprocessing.cpu_count(),
num_parallel_calls=dataset_ops.AUTOTUNE)
self._input_dataset = input_dataset
self._path = path
self._compression = compression
self._reader_func = structured_function.StructuredFunctionWrapper(
reader_func,
self._transformation_name() + ".reader_func",
# Dataset of datasets of input elements
input_structure=dataset_ops.DatasetSpec(
dataset_ops.DatasetSpec(input_dataset.element_spec)),
use_legacy_function=use_legacy_function)
self._shard_func = structured_function.StructuredFunctionWrapper(
shard_func,
self._transformation_name() + ".shard_func",
dataset=input_dataset,
use_legacy_function=use_legacy_function)
if ((not self._shard_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int32))) and
(not self._shard_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int64)))):
raise TypeError(f"Invalid `shard_func`. `shard_func` must return "
f"`tf.int64` scalar tensor but its return type is "
f"{self._shard_func.output_structure}.")
self._name = name
variant_tensor = ged_ops.snapshot_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
path,
self._reader_func.function.captured_inputs,
self._shard_func.function.captured_inputs,
compression=compression,
reader_func=self._reader_func.function,
shard_func=self._shard_func.function,
**self._common_args)
super().__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._reader_func, self._shard_func]
def _transformation_name(self):
return "Dataset.snapshot()"
| _SnapshotDataset |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_function.py | {
"start": 791,
"end": 2192
} | class ____(threading.local):
"""A context object holding state about the TPU computation being built."""
def __init__(self):
"""Creates a new TpuContext."""
self._number_of_shards = None
@property
def number_of_shards(self):
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
self._number_of_shards = number_of_shards
# The Tpu context holds the number of shards when a sharded computation is
# being built, or None if no computation is being built.
_current_tpu_context = TpuContext()
@contextlib.contextmanager
def tpu_shard_context(number_of_shards):
"""A context manager setting current number of shards."""
if _current_tpu_context.number_of_shards is not None:
raise NotImplementedError("tpu_shard_context cannot be nested")
try:
_current_tpu_context.set_number_of_shards(number_of_shards)
yield
finally:
_current_tpu_context.set_number_of_shards(None)
def get_tpu_context():
return _current_tpu_context
# Decorator function for tpu computation func that was passed to tpu.rewrite()
# if there is an embedded training loop in this func, trace tools will generate
# step markers for each iteration.
def on_device_training_loop(func):
# Value for this attribute is from xla.DebugOptions.StepMarkerLocation.
setattr(func, "step_marker_location", "STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP")
return func
| TpuContext |
python | ray-project__ray | python/ray/train/tests/test_predictor.py | {
"start": 1068,
"end": 1542
} | class ____(DummyPreprocessor):
def _transform_numpy(
self, np_data: Union[np.ndarray, Dict[str, np.ndarray]]
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
self.inputs.append(np_data)
assert isinstance(np_data, np.ndarray)
rst = np_data * self.multiplier
self.outputs.append(rst)
return rst
@classmethod
def preferred_batch_format(cls) -> BatchFormat:
return BatchFormat.NUMPY
| DummyWithNumpyPreprocessor |
python | wandb__wandb | wandb/apis/importers/wandb.py | {
"start": 50195,
"end": 54547
} | class ____:
entity: str = ""
project: str = ""
run_id: str = RUN_DUMMY_PLACEHOLDER
id: str = RUN_DUMMY_PLACEHOLDER
display_name: str = RUN_DUMMY_PLACEHOLDER
notes: str = ""
url: str = ""
group: str = ""
created_at: str = "2000-01-01"
user: _DummyUser = field(default_factory=_DummyUser)
tags: list = field(default_factory=list)
summary: dict = field(default_factory=dict)
config: dict = field(default_factory=dict)
def files(self):
return []
def _read_ndjson(fname: str) -> Optional[pl.DataFrame]:
try:
df = pl.read_ndjson(fname)
except FileNotFoundError:
return None
except RuntimeError as e:
# No runs previously checked
if "empty string is not a valid JSON value" in str(e):
return None
if "error parsing ndjson" in str(e):
return None
raise
return df
def _get_run_or_dummy_from_art(art: Artifact, api=None):
run = None
try:
run = art.logged_by()
except ValueError as e:
logger.warning(
f"Can't log artifact because run doesn't exist, {art=}, {run=}, {e=}"
)
if run is not None:
return run
query = gql(
"""
query ArtifactCreatedBy(
$id: ID!
) {
artifact(id: $id) {
createdBy {
... on Run {
name
project {
name
entityName
}
}
}
}
}
"""
)
response = api.client.execute(query, variable_values={"id": art.id})
creator = response.get("artifact", {}).get("createdBy", {})
run = _DummyRun(
entity=art.entity,
project=art.project,
run_id=creator.get("name", RUN_DUMMY_PLACEHOLDER),
id=creator.get("name", RUN_DUMMY_PLACEHOLDER),
)
return run
def _clear_fname(fname: str) -> None:
old_fname = f"{internal.ROOT_DIR}/{fname}"
new_fname = f"{internal.ROOT_DIR}/prev_{fname}"
logger.debug(f"Moving {old_fname=} to {new_fname=}")
try:
shutil.copy2(old_fname, new_fname)
except FileNotFoundError:
# this is just to make a copy of the last iteration, so its ok if the src doesn't exist
pass
with open(fname, "w"):
pass
def _download_art(art: Artifact, root: str) -> Optional[str]:
try:
with patch("click.echo"):
return art.download(root=root, skip_cache=True)
except Exception:
logger.exception(f"Error downloading artifact {art=}")
def _clone_art(art: Artifact, root: Optional[str] = None):
if root is None:
# Currently, we would only ever clone a src artifact to move it to dst.
root = f"{SRC_ART_PATH}/{art.name}"
if (path := _download_art(art, root=root)) is None:
raise ValueError(f"Problem downloading {art=}")
name, _ = art.name.split(":v")
# Hack: skip naming validation check for wandb-* types
new_art = Artifact(name, ART_DUMMY_PLACEHOLDER_TYPE)
new_art._type = art.type
new_art._created_at = art.created_at
new_art._aliases = art.aliases
new_art._description = art.description
with patch("click.echo"):
new_art.add_dir(path)
return new_art
def _create_files_if_not_exists() -> None:
fnames = [
ARTIFACT_ERRORS_FNAME,
ARTIFACT_SUCCESSES_FNAME,
RUN_ERRORS_FNAME,
RUN_SUCCESSES_FNAME,
]
for fname in fnames:
logger.debug(f"Creating {fname=} if not exists")
with open(fname, "a"):
pass
def _merge_dfs(dfs: List[pl.DataFrame]) -> pl.DataFrame:
# Ensure there are DataFrames in the list
if len(dfs) == 0:
return pl.DataFrame()
if len(dfs) == 1:
return dfs[0]
merged_df = dfs[0]
for df in dfs[1:]:
merged_df = merged_df.join(df, how="outer", on=["_step"])
col_pairs = [
(c, f"{c}_right")
for c in merged_df.columns
if f"{c}_right" in merged_df.columns
]
for col, right in col_pairs:
new_col = merged_df[col].fill_null(merged_df[right])
merged_df = merged_df.with_columns(new_col).drop(right)
return merged_df
| _DummyRun |
python | python-openxml__python-docx | src/docx/parts/hdrftr.py | {
"start": 1008,
"end": 1709
} | class ____(StoryPart):
"""Definition of a section header."""
@classmethod
def new(cls, package: Package):
"""Return newly created header part."""
partname = package.next_partname("/word/header%d.xml")
content_type = CT.WML_HEADER
element = parse_xml(cls._default_header_xml())
return cls(partname, content_type, element, package)
@classmethod
def _default_header_xml(cls):
"""Return bytes containing XML for a default header part."""
path = os.path.join(os.path.split(__file__)[0], "..", "templates", "default-header.xml")
with open(path, "rb") as f:
xml_bytes = f.read()
return xml_bytes
| HeaderPart |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/manifest_resolver.py | {
"start": 143,
"end": 6639
} | class ____:
"""
An incoming manifest can contain references to values previously defined.
This parser will dereference these values to produce a complete ConnectionDefinition.
References can be defined using a #/<arg> string.
```
key: 1234
reference: "#/key"
```
will produce the following definition:
```
key: 1234
reference: 1234
```
This also works with objects:
```
key_value_pairs:
k1: v1
k2: v2
same_key_value_pairs: "#/key_value_pairs"
```
will produce the following definition:
```
key_value_pairs:
k1: v1
k2: v2
same_key_value_pairs:
k1: v1
k2: v2
```
The $ref keyword can be used to refer to an object and enhance it with addition key-value pairs
```
key_value_pairs:
k1: v1
k2: v2
same_key_value_pairs:
$ref: "#/key_value_pairs"
k3: v3
```
will produce the following definition:
```
key_value_pairs:
k1: v1
k2: v2
same_key_value_pairs:
k1: v1
k2: v2
k3: v3
```
References can also point to nested values.
Nested references are ambiguous because one could define a key containing with `.`
in this example, we want to refer to the limit key in the dict object:
```
dict:
limit: 50
limit_ref: "#/dict/limit"
```
will produce the following definition:
```
dict
limit: 50
limit-ref: 50
```
whereas here we want to access the `nested/path` value.
```
nested:
path: "first one"
nested/path: "uh oh"
value: "#/nested/path
```
will produce the following definition:
```
nested:
path: "first one"
nested/path: "uh oh"
value: "uh oh"
```
to resolve the ambiguity, we try looking for the reference key at the top level, and then traverse the structs downward
until we find a key with the given path, or until there is nothing to traverse.
"""
def preprocess_manifest(self, manifest: Mapping[str, Any]) -> Mapping[str, Any]:
"""
:param manifest: incoming manifest that could have references to previously defined components
:return:
"""
return self._evaluate_node(manifest, manifest, set()) # type: ignore[no-any-return]
def _evaluate_node(self, node: Any, manifest: Mapping[str, Any], visited: Set[Any]) -> Any: # noqa: ANN401
if isinstance(node, dict):
evaluated_dict = {k: self._evaluate_node(v, manifest, visited) for k, v in node.items() if not self._is_ref_key(k)}
if REF_TAG in node:
# The node includes a $ref key, so we splat the referenced value(s) into the evaluated dict
evaluated_ref = self._evaluate_node(node[REF_TAG], manifest, visited)
if not isinstance(evaluated_ref, dict):
return evaluated_ref
else:
# The values defined on the component take precedence over the reference values
return evaluated_ref | evaluated_dict
else:
return evaluated_dict
elif isinstance(node, list):
return [self._evaluate_node(v, manifest, visited) for v in node]
elif self._is_ref(node):
if node in visited:
raise ValueError(node)
visited.add(node)
ret = self._evaluate_node(self._lookup_ref_value(node, manifest), manifest, visited)
visited.remove(node)
return ret
else:
return node
def _lookup_ref_value(self, ref: str, manifest: Mapping[str, Any]) -> Any: # noqa: ANN401
ref_match = re.match(r"#/(.*)", ref)
if not ref_match:
raise ValueError(f"Invalid reference format {ref}")
try:
path = ref_match.groups()[0]
return self._read_ref_value(path, manifest)
except (AttributeError, KeyError, IndexError):
raise ValueError(f"{path}, {ref}")
@staticmethod
def _is_ref(node: Any) -> bool: # noqa: ANN401
return isinstance(node, str) and node.startswith("#/")
@staticmethod
def _is_ref_key(key: str) -> bool:
return bool(key == REF_TAG)
@staticmethod
def _read_ref_value(ref: str, manifest_node: Mapping[str, Any]) -> Any: # noqa: ANN401
"""
Read the value at the referenced location of the manifest.
References are ambiguous because one could define a key containing `/`
In this example, we want to refer to the `limit` key in the `dict` object:
dict:
limit: 50
limit_ref: "#/dict/limit"
Whereas here we want to access the `nested/path` value.
nested:
path: "first one"
nested/path: "uh oh"
value: "#/nested/path"
To resolve the ambiguity, we try looking for the reference key at the top level, and then traverse the structs downward
until we find a key with the given path, or until there is nothing to traverse.
Consider the path foo/bar/baz. To resolve the ambiguity, we first try 'foo/bar/baz' in its entirety as a top-level key. If this
fails, we try 'foo' as the top-level key, and if this succeeds, pass 'bar/baz' on as the key to be tried at the next level.
"""
while ref:
try:
return manifest_node[ref]
except (KeyError, TypeError):
head, ref = _parse_path(ref)
manifest_node = manifest_node[head] # type: ignore # Couldn't figure out how to fix this since manifest_node can get reassigned into other types like lists
return manifest_node
def _parse_path(ref: str) -> Tuple[Union[str, int], str]:
"""
Return the next path component, together with the rest of the path.
A path component may be a string key, or an int index.
>>> _parse_path("foo/bar")
"foo", "bar"
>>> _parse_path("foo/7/8/bar")
"foo", "7/8/bar"
>>> _parse_path("7/8/bar")
7, "8/bar"
>>> _parse_path("8/bar")
8, "bar"
>>> _parse_path("8foo/bar")
"8foo", "bar"
"""
match = re.match(r"([^/]*)/?(.*)", ref)
if match:
first, rest = match.groups()
try:
return int(first), rest
except ValueError:
return first, rest
else:
raise ValueError(f"Invalid path {ref} specified")
| ManifestReferenceResolver |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/gcs_upload.py | {
"start": 1453,
"end": 1600
} | class ____:
metadata_uploaded: bool
metadata_file_path: str
uploaded_files: List[UploadedFile]
@dataclass(frozen=True)
| MetadataUploadInfo |
python | spyder-ide__spyder | spyder/plugins/explorer/widgets/remote_explorer.py | {
"start": 1907,
"end": 2062
} | class ____:
CopyPaste = "remote_copy_paste_section"
Extras = "remote_extras_section"
New = "remote_new_section"
| RemoteExplorerContextMenuSections |
python | apache__airflow | airflow-core/tests/unit/utils/test_sqlalchemy.py | {
"start": 7994,
"end": 13750
} | class ____:
@pytest.mark.parametrize(
("input", "expected"),
[
("anything", "anything"),
(
{"pod_override": TEST_POD},
{
"pod_override": {
"__var": {"spec": {"containers": [{"name": "base"}]}},
"__type": DagAttributeTypes.POD,
}
},
),
],
)
def test_bind_processor(self, input, expected, mocker):
"""
The returned bind processor should pickle the object as is, unless it is a dictionary with
a pod_override node, in which case it should run it through BaseSerialization.
"""
config_type = ExecutorConfigType()
mock_dialect = mocker.MagicMock()
mock_dialect.dbapi = None
process = config_type.bind_processor(mock_dialect)
assert pickle.loads(process(input)) == expected
assert pickle.loads(process(input)) == expected, "should not mutate variable"
@pytest.mark.parametrize(
"input",
[
pytest.param(
pickle.dumps("anything"),
id="anything",
),
pytest.param(
pickle.dumps({"pod_override": BaseSerialization.serialize(TEST_POD)}),
id="serialized_pod",
),
pytest.param(
pickle.dumps({"pod_override": TEST_POD}),
id="old_pickled_raw_pod",
),
pytest.param(
pickle.dumps({"pod_override": {"name": "hi"}}),
id="arbitrary_dict",
),
],
)
def test_result_processor(self, input, mocker):
"""
The returned bind processor should pickle the object as is, unless it is a dictionary with
a pod_override node whose value was serialized with BaseSerialization.
"""
config_type = ExecutorConfigType()
mock_dialect = mocker.MagicMock()
mock_dialect.dbapi = None
process = config_type.result_processor(mock_dialect, None)
result = process(input)
expected = pickle.loads(input)
pod_override = isinstance(expected, dict) and expected.get("pod_override")
if pod_override and isinstance(pod_override, dict) and pod_override.get(Encoding.TYPE):
# We should only deserialize a pod_override with BaseSerialization if
# it was serialized with BaseSerialization (which is the behavior added in #24356
expected["pod_override"] = BaseSerialization.deserialize(expected["pod_override"])
assert result == expected
def test_compare_values(self):
"""
When comparison raises AttributeError, return False.
This can happen when executor config contains kubernetes objects pickled
under older kubernetes library version.
"""
class MockAttrError: # noqa: PLW1641
def __eq__(self, other):
raise AttributeError("hello")
a = MockAttrError()
with pytest.raises(AttributeError):
# just verify for ourselves that comparing directly will throw AttributeError
assert a == a
instance = ExecutorConfigType()
assert instance.compare_values(a, a) is False
assert instance.compare_values("a", "a") is True
def test_result_processor_bad_pickled_obj(self, mocker):
"""
If unpickled obj is missing attrs that curr lib expects
"""
test_container = k8s.V1Container(name="base")
test_pod = k8s.V1Pod(spec=k8s.V1PodSpec(containers=[test_container]))
copy_of_test_pod = deepcopy(test_pod)
# curr api expects attr `tty`
assert "tty" in test_container.openapi_types
# it lives in protected attr _tty
assert hasattr(test_container, "_tty")
# so, let's remove it before pickling, to simulate what happens in real life
del test_container._tty
# now let's prove that this blows up when calling to_dict
with pytest.raises(AttributeError):
test_pod.to_dict()
# no such problem with the copy
assert copy_of_test_pod.to_dict()
# so we need to roundtrip it through json
fixed_pod = ensure_pod_is_valid_after_unpickling(test_pod)
# and, since the missing attr was None anyway, we actually have the same pod
assert fixed_pod.to_dict() == copy_of_test_pod.to_dict()
# now, let's verify that result processor makes this all work
# first, check that bad pod is still bad
with pytest.raises(AttributeError):
test_pod.to_dict()
# define what will be retrieved from db
input = pickle.dumps({"pod_override": TEST_POD})
# get the result processor method
config_type = ExecutorConfigType()
mock_dialect = mocker.MagicMock()
mock_dialect.dbapi = None
process = config_type.result_processor(mock_dialect, None)
# apply the result processor
result = process(input)
# show that the pickled (bad) pod is now a good pod, and same as the copy made
# before making it bad
assert result["pod_override"].to_dict() == copy_of_test_pod.to_dict()
@pytest.mark.parametrize(
("mock_version", "expected_result"),
[
("1.0.0", True), # Test 1: v1 identified as v1
("2.3.4", False), # Test 2: v2 not identified as v1
],
)
def test_is_sqlalchemy_v1(mock_version, expected_result, mocker):
mock_metadata = mocker.patch("airflow.utils.sqlalchemy.metadata")
mock_metadata.version.return_value = mock_version
assert is_sqlalchemy_v1() == expected_result
| TestExecutorConfigType |
python | ray-project__ray | doc/source/serve/doc_code/batching_guide.py | {
"start": 494,
"end": 1183
} | class ____:
@serve.batch(max_batch_size=8, batch_wait_timeout_s=0.1)
async def __call__(self, multiple_samples: List[int]) -> List[int]:
# Use numpy's vectorized computation to efficiently process a batch.
return np.array(multiple_samples) * 2
handle: DeploymentHandle = serve.run(Model.bind())
responses = [handle.remote(i) for i in range(8)]
assert list(r.result() for r in responses) == [i * 2 for i in range(8)]
# __batch_end__
# __batch_params_update_begin__
from typing import Dict
@serve.deployment(
# These values can be overridden in the Serve config.
user_config={
"max_batch_size": 10,
"batch_wait_timeout_s": 0.5,
}
)
| Model |
python | readthedocs__readthedocs.org | readthedocs/organizations/views/private.py | {
"start": 4514,
"end": 4842
} | class ____(
PrivateViewMixin,
UpdateChangeReasonPostView,
OrganizationView,
AsyncDeleteViewWithMessage,
):
http_method_names = ["post"]
success_message = _("Organization queued for deletion")
def get_success_url(self):
return reverse_lazy("organization_list")
# Owners views
| DeleteOrganization |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/assets/definition/cacheable_assets_definition.py | {
"start": 9074,
"end": 10537
} | class ____(CacheableAssetsDefinition):
"""Wraps an instance of CacheableAssetsDefinition, applying transformed_assets_def to the
generated AssetsDefinition objects. This lets e.g. users define resources on
the cacheable assets at repo creation time which are not actually bound until
the assets themselves are created.
"""
def __init__(
self,
unique_id: str,
wrapped: CacheableAssetsDefinition,
):
super().__init__(unique_id)
self._wrapped = wrapped
def compute_cacheable_data(self) -> Sequence[AssetsDefinitionCacheableData]:
return self._wrapped.compute_cacheable_data()
def build_definitions(
self, data: Sequence[AssetsDefinitionCacheableData]
) -> Sequence[AssetsDefinition]:
return [
self.transformed_assets_def(assets_def)
for assets_def in self._wrapped.build_definitions(data)
]
@abstractmethod
def transformed_assets_def(self, assets_def: AssetsDefinition) -> AssetsDefinition:
"""Implement this method to transform the AssetsDefinition objects
generated by the underlying, wrapped CacheableAssetsDefinition.
"""
raise NotImplementedError()
def _map_to_hashable(mapping: Mapping[Any, Any]) -> bytes:
return json.dumps(
{json.dumps(k, sort_keys=True): v for k, v in mapping.items()},
sort_keys=True,
).encode("utf-8")
| WrappedCacheableAssetsDefinition |
python | scikit-learn__scikit-learn | sklearn/_loss/loss.py | {
"start": 31565,
"end": 34014
} | class ____(BaseLoss):
"""Half Binomial deviance loss with logit link, for binary classification.
This is also know as binary cross entropy, log-loss and logistic loss.
Domain:
y_true in [0, 1], i.e. regression on the unit interval
y_pred in (0, 1), i.e. boundaries excluded
Link:
y_pred = expit(raw_prediction)
For a given sample x_i, half Binomial deviance is defined as the negative
log-likelihood of the Binomial/Bernoulli distribution and can be expressed
as::
loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i
See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman,
section 4.4.1 (about logistic regression).
Note that the formulation works for classification, y = {0, 1}, as well as
logistic regression, y = [0, 1].
If you add `constant_to_optimal_zero` to the loss, you get half the
Bernoulli/binomial deviance.
More details: Inserting the predicted probability y_pred = expit(raw_prediction)
in the loss gives the well known::
loss(x_i) = - y_true_i * log(y_pred_i) - (1 - y_true_i) * log(1 - y_pred_i)
"""
def __init__(self, sample_weight=None):
super().__init__(
closs=CyHalfBinomialLoss(),
link=LogitLink(),
n_classes=2,
)
self.interval_y_true = Interval(0, 1, True, True)
def constant_to_optimal_zero(self, y_true, sample_weight=None):
# This is non-zero only if y_true is neither 0 nor 1.
term = xlogy(y_true, y_true) + xlogy(1 - y_true, 1 - y_true)
if sample_weight is not None:
term *= sample_weight
return term
def predict_proba(self, raw_prediction):
"""Predict probabilities.
Parameters
----------
raw_prediction : array of shape (n_samples,) or (n_samples, 1)
Raw prediction values (in link space).
Returns
-------
proba : array of shape (n_samples, 2)
Element-wise class probabilities.
"""
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype)
proba[:, 1] = self.link.inverse(raw_prediction)
proba[:, 0] = 1 - proba[:, 1]
return proba
| HalfBinomialLoss |
python | marshmallow-code__marshmallow | src/marshmallow/constants.py | {
"start": 115,
"end": 415
} | class ____:
def __bool__(self):
return False
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __repr__(self):
return "<marshmallow.missing>"
def __len__(self):
return 0
missing: typing.Final = _Missing()
| _Missing |
python | tiangolo__fastapi | scripts/contributors.py | {
"start": 3648,
"end": 8771
} | class ____(BaseModel):
contributors: Counter[str]
translation_reviewers: Counter[str]
translators: Counter[str]
authors: dict[str, Author]
def get_contributors(pr_nodes: list[PullRequestNode]) -> ContributorsResults:
contributors = Counter[str]()
translation_reviewers = Counter[str]()
translators = Counter[str]()
authors: dict[str, Author] = {}
for pr in pr_nodes:
if pr.author:
authors[pr.author.login] = pr.author
is_lang = False
for label in pr.labels.nodes:
if label.name == "lang-all":
is_lang = True
break
for review in pr.reviews.nodes:
if review.author:
authors[review.author.login] = review.author
if is_lang:
translation_reviewers[review.author.login] += 1
if pr.state == "MERGED" and pr.author:
if is_lang:
translators[pr.author.login] += 1
else:
contributors[pr.author.login] += 1
return ContributorsResults(
contributors=contributors,
translation_reviewers=translation_reviewers,
translators=translators,
authors=authors,
)
def get_users_to_write(
*,
counter: Counter[str],
authors: dict[str, Author],
min_count: int = 2,
) -> dict[str, Any]:
users: dict[str, Any] = {}
for user, count in counter.most_common():
if count >= min_count:
author = authors[user]
users[user] = {
"login": user,
"count": count,
"avatarUrl": author.avatarUrl,
"url": author.url,
}
return users
def update_content(*, content_path: Path, new_content: Any) -> bool:
old_content = content_path.read_text(encoding="utf-8")
new_content = yaml.dump(new_content, sort_keys=False, width=200, allow_unicode=True)
if old_content == new_content:
logging.info(f"The content hasn't changed for {content_path}")
return False
content_path.write_text(new_content, encoding="utf-8")
logging.info(f"Updated {content_path}")
return True
def main() -> None:
logging.basicConfig(level=logging.INFO)
settings = Settings()
logging.info(f"Using config: {settings.model_dump_json()}")
g = Github(settings.github_token.get_secret_value())
repo = g.get_repo(settings.github_repository)
pr_nodes = get_pr_nodes(settings=settings)
contributors_results = get_contributors(pr_nodes=pr_nodes)
authors = contributors_results.authors
top_contributors = get_users_to_write(
counter=contributors_results.contributors,
authors=authors,
)
top_translators = get_users_to_write(
counter=contributors_results.translators,
authors=authors,
)
top_translations_reviewers = get_users_to_write(
counter=contributors_results.translation_reviewers,
authors=authors,
)
# For local development
# contributors_path = Path("../docs/en/data/contributors.yml")
contributors_path = Path("./docs/en/data/contributors.yml")
# translators_path = Path("../docs/en/data/translators.yml")
translators_path = Path("./docs/en/data/translators.yml")
# translation_reviewers_path = Path("../docs/en/data/translation_reviewers.yml")
translation_reviewers_path = Path("./docs/en/data/translation_reviewers.yml")
updated = [
update_content(content_path=contributors_path, new_content=top_contributors),
update_content(content_path=translators_path, new_content=top_translators),
update_content(
content_path=translation_reviewers_path,
new_content=top_translations_reviewers,
),
]
if not any(updated):
logging.info("The data hasn't changed, finishing.")
return
logging.info("Setting up GitHub Actions git user")
subprocess.run(["git", "config", "user.name", "github-actions"], check=True)
subprocess.run(
["git", "config", "user.email", "github-actions@github.com"], check=True
)
branch_name = f"fastapi-people-contributors-{secrets.token_hex(4)}"
logging.info(f"Creating a new branch {branch_name}")
subprocess.run(["git", "checkout", "-b", branch_name], check=True)
logging.info("Adding updated file")
subprocess.run(
[
"git",
"add",
str(contributors_path),
str(translators_path),
str(translation_reviewers_path),
],
check=True,
)
logging.info("Committing updated file")
message = "👥 Update FastAPI People - Contributors and Translators"
subprocess.run(["git", "commit", "-m", message], check=True)
logging.info("Pushing branch")
subprocess.run(["git", "push", "origin", branch_name], check=True)
logging.info("Creating PR")
pr = repo.create_pull(title=message, body=message, base="master", head=branch_name)
logging.info(f"Created PR: {pr.number}")
logging.info("Finished")
if __name__ == "__main__":
main()
| ContributorsResults |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 8707,
"end": 8786
} | class ____(Opcode):
_FLAGS = HAS_JUNKNOWN | NO_NEXT
__slots__ = ()
| BREAK_LOOP |
python | huggingface__transformers | tests/models/got_ocr2/test_modeling_got_ocr2.py | {
"start": 5497,
"end": 11092
} | class ____(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf")
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_small_model_integration_test_got_ocr_stop_strings(self):
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/fixtures_ocr/resolve/main/iam_picture.jpeg"
)
inputs = self.processor(image, return_tensors="pt").to(torch_device)
generate_ids = model.generate(
**inputs,
do_sample=False,
num_beams=1,
tokenizer=self.processor.tokenizer,
stop_strings="<|im_end|>",
max_new_tokens=4096,
)
decoded_output = self.processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_output = "industre"
self.assertEqual(decoded_output, expected_output)
@slow
def test_small_model_integration_test_got_ocr_format(self):
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/image_ocr.jpg"
)
inputs = self.processor(image, return_tensors="pt", format=True).to(torch_device)
generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4)
decoded_output = self.processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_output = "\\title{\nR"
self.assertEqual(decoded_output, expected_output)
@slow
def test_small_model_integration_test_got_ocr_fine_grained(self):
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png"
)
inputs = self.processor(image, return_tensors="pt", color="green").to(torch_device)
generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4)
decoded_output = self.processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_output = "You should keep in"
self.assertEqual(decoded_output, expected_output)
@slow
def test_small_model_integration_test_got_ocr_crop_to_patches(self):
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/one_column.png"
)
inputs = self.processor(image, return_tensors="pt", crop_to_patches=True).to(torch_device)
generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4)
decoded_output = self.processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_output = "on developing architectural improvements"
self.assertEqual(decoded_output, expected_output)
@slow
def test_small_model_integration_test_got_ocr_multi_pages(self):
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
image1 = load_image(
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/one_column.png"
)
image2 = load_image(
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png"
)
inputs = self.processor([image1, image2], return_tensors="pt", multi_page=True).to(torch_device)
generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4)
decoded_output = self.processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_output = "on developing architectural improvements"
self.assertEqual(decoded_output, expected_output)
@slow
def test_small_model_integration_test_got_ocr_batched(self):
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
image1 = load_image(
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png"
)
image2 = load_image(
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/image_ocr.jpg"
)
inputs = self.processor([image1, image2], return_tensors="pt").to(torch_device)
generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4)
decoded_output = self.processor.batch_decode(
generate_ids[:, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_output = ["Reducing the number", "R&D QUALITY"]
self.assertEqual(decoded_output, expected_output)
| GotOcr2IntegrationTest |
python | getsentry__sentry | tests/sentry/sentry_apps/api/parsers/test_image.py | {
"start": 201,
"end": 1054
} | class ____(unittest.TestCase):
def setUp(self) -> None:
self.schema: dict[str, Any] = {
"type": "image",
"url": "https://example.com/image.gif",
"alt": "example video",
}
def test_valid_schema(self) -> None:
validate_component(self.schema)
@invalid_schema
def test_missing_url(self) -> None:
del self.schema["url"]
validate_component(self.schema)
@invalid_schema
def test_invalid_url(self) -> None:
self.schema["url"] = "not-a-url"
validate_component(self.schema)
def test_missing_alt(self) -> None:
del self.schema["alt"]
validate_component(self.schema)
@invalid_schema
def test_invalid_alt_type(self) -> None:
self.schema["alt"] = 1
validate_component(self.schema)
| TestImageSchemaValidation |
python | huggingface__transformers | src/transformers/models/xmod/modeling_xmod.py | {
"start": 15310,
"end": 17065
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = XmodCrossAttention if is_cross_attention else XmodSelfAttention
self.self = attention_class(config, is_causal=is_causal, layer_idx=layer_idx)
self.output = XmodSelfOutput(config)
self.pre_norm = config.pre_norm
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
residual = hidden_states
if self.pre_norm:
hidden_states = self.output.LayerNorm(hidden_states)
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self.output(attention_output, residual)
if not self.pre_norm:
attention_output = self.output.LayerNorm(attention_output)
return attention_output, attn_weights
# Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate
| XmodAttention |
python | Lightning-AI__lightning | src/lightning/pytorch/callbacks/progress/tqdm_progress.py | {
"start": 1217,
"end": 2175
} | class ____(_tqdm):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Custom tqdm progressbar where we append 0 to floating points/strings to prevent the progress bar from
flickering."""
# this just to make the make docs happy, otherwise it pulls docs which has some issues...
super().__init__(*args, **kwargs)
@staticmethod
def format_num(n: Union[int, float, str]) -> str:
"""Add additional padding to the formatted numbers."""
should_be_padded = isinstance(n, (float, str))
if not isinstance(n, str):
n = _tqdm.format_num(n)
assert isinstance(n, str)
if should_be_padded and "e" not in n:
if "." not in n and len(n) < _PAD_SIZE:
try:
_ = float(n)
except ValueError:
return n
n += "."
n += "0" * (_PAD_SIZE - len(n))
return n
| Tqdm |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 540707,
"end": 541159
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of CreateProjectV2"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project_v2")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project_v2 = sgqlc.types.Field("ProjectV2", graphql_name="projectV2")
"""The new project."""
| CreateProjectV2Payload |
python | getsentry__sentry | src/sentry/integrations/services/integration/service.py | {
"start": 841,
"end": 10028
} | class ____(RpcService):
key = "integration"
local_mode = SiloMode.CONTROL
@classmethod
def get_local_implementation(cls) -> RpcService:
from sentry.integrations.services.integration.impl import DatabaseBackedIntegrationService
return DatabaseBackedIntegrationService()
@rpc_method
@abstractmethod
def page_integration_ids(
self,
*,
provider_keys: list[str],
organization_id: int,
args: RpcPaginationArgs,
) -> RpcPaginationResult:
pass
@rpc_method
@abstractmethod
def send_message(
self,
*,
integration_id: int,
organization_id: int,
channel: str,
message: str,
) -> bool:
pass
@rpc_method
@abstractmethod
def get_integrations(
self,
*,
integration_ids: list[int] | None = None,
organization_id: int | None = None,
status: int | None = None,
providers: list[str] | None = None,
org_integration_status: int | None = None,
limit: int | None = None,
organization_integration_id: int | None = None,
) -> list[RpcIntegration]:
"""
Returns all RpcIntegrations matching the provided kwargs.
"""
@rpc_method
@abstractmethod
def get_integration(
self,
*,
integration_id: int | None = None,
provider: str | None = None,
external_id: str | None = None,
organization_id: int | None = None,
organization_integration_id: int | None = None,
status: int | None = None,
) -> RpcIntegration | None:
"""
Returns an RpcIntegration using either the id or a combination of the provider and external_id
"""
@rpc_method
@abstractmethod
def get_organization_integrations(
self,
*,
org_integration_ids: list[int] | None = None,
integration_id: int | None = None,
organization_id: int | None = None,
organization_ids: list[int] | None = None,
status: int | None = None,
providers: list[str] | None = None,
has_grace_period: bool | None = None,
grace_period_expired: bool | None = None,
limit: int | None = None,
name: str | None = None,
) -> list[RpcOrganizationIntegration]:
"""
Returns all RpcOrganizationIntegrations from the matching kwargs.
If providers is set, it will also be filtered by the integration providers set in the list.
If has_grace_period is set, it will filter by whether the grace_period is null or not.
"""
@rpc_method
def get_organization_integration(
self, *, integration_id: int, organization_id: int
) -> RpcOrganizationIntegration | None:
"""
Returns an RpcOrganizationIntegration from the integration and organization ids.
"""
ois = self.get_organization_integrations(
integration_id=integration_id, organization_id=organization_id, limit=1
)
return ois[0] if len(ois) > 0 else None
@rpc_method
@abstractmethod
def start_grace_period_for_provider(
self,
*,
organization_id: int,
provider: str,
grace_period_end: datetime,
status: int | None = ObjectStatus.ACTIVE,
skip_oldest: bool = False,
) -> list[RpcOrganizationIntegration]:
"""
Start grace period for all OrganizationIntegrations of a given provider for an organization
Args:
organization_id (int): The Organization whose OrganizationIntegrations will be grace perioded
provider (str): The provider key - e.g. "github"
grace_period_end (datetime): The grace period end date
status (int, optional): The status of the OrganizationIntegrations. Defaults to ObjectStatus.ACTIVE. Put None to include all statuses.
skip_oldest (bool, optional): Flag for if we want to skip grace period for the oldest OrganizationIntegration per Integration. Defaults to False.
Returns:
list[RpcOrganizationIntegration]: The updated OrganizationIntegrations
"""
@rpc_method
@abstractmethod
def organization_context(
self,
*,
organization_id: int,
integration_id: int | None = None,
provider: str | None = None,
external_id: str | None = None,
) -> RpcOrganizationContext:
"""
Returns a tuple of RpcIntegration and RpcOrganizationIntegration. The integration is selected
by either integration_id, or a combination of provider and external_id.
"""
@rpc_method
@abstractmethod
def organization_contexts(
self,
*,
organization_id: int | None = None,
integration_id: int | None = None,
provider: str | None = None,
external_id: str | None = None,
) -> RpcOrganizationContextList:
"""
Returns a tuple of RpcIntegration and RpcOrganizationIntegrations. The integrations are selected
by either integration_id, or a combination of provider and external_id.
"""
@rpc_method
@abstractmethod
def update_integrations(
self,
*,
integration_ids: list[int],
name: str | None = None,
metadata: dict[str, Any] | None = None,
status: int | None = None,
) -> list[RpcIntegration]:
"""
Returns a list of RpcIntegrations after updating the fields provided.
To set a field as null, use the `set_{FIELD}_null` keyword argument.
"""
@rpc_method
@abstractmethod
def add_organization(self, *, integration_id: int, org_ids: list[int]) -> RpcIntegration | None:
"""
Adds organizations to an existing integration
"""
@rpc_method
@abstractmethod
def update_integration(
self,
*,
integration_id: int,
name: str | None = None,
metadata: dict[str, Any] | None = None,
status: int | None = None,
) -> RpcIntegration | None:
"""
Returns an RpcIntegration after updating the fields provided.
To set a field as null, use the `set_{FIELD}_null` keyword argument.
"""
@rpc_method
@abstractmethod
def update_organization_integrations(
self,
*,
org_integration_ids: list[int],
config: dict[str, Any] | None = None,
status: int | None = None,
grace_period_end: datetime | None = None,
set_grace_period_end_null: bool | None = None,
) -> list[RpcOrganizationIntegration]:
"""
Returns a list of RpcOrganizationIntegrations after updating the fields provided.
To set a field as null, use the `set_{FIELD}_null` keyword argument.
"""
@rpc_method
@abstractmethod
def update_organization_integration(
self,
*,
org_integration_id: int,
config: dict[str, Any] | None = None,
status: int | None = None,
grace_period_end: datetime | None = None,
set_grace_period_end_null: bool | None = None,
) -> RpcOrganizationIntegration | None:
"""
Returns an RpcOrganizationIntegration after updating the fields provided.
To set a field as null, use the `set_{FIELD}_null` keyword argument.
"""
@rpc_method
@abstractmethod
def send_incident_alert_notification(
self,
*,
sentry_app_id: int,
action_id: int,
incident_id: int,
new_status: int,
incident_attachment_json: str,
organization_id: int,
metric_value: float,
notification_uuid: str | None = None,
) -> bool:
pass
@rpc_method
@abstractmethod
def send_msteams_incident_alert_notification(
self, *, integration_id: int, channel: str, attachment: dict[str, Any]
) -> bool:
raise NotImplementedError
@rpc_method
@abstractmethod
def delete_integration(self, *, integration_id: int) -> None:
pass
@rpc_method
@abstractmethod
def get_integration_external_project(
self, *, organization_id: int, integration_id: int, external_id: str
) -> RpcIntegrationExternalProject | None:
pass
@rpc_method
@abstractmethod
def get_integration_external_projects(
self, *, organization_id: int, integration_id: int, external_id: str | None = None
) -> list[RpcIntegrationExternalProject]:
pass
@rpc_method
@abstractmethod
def get_integration_identity_context(
self,
*,
integration_provider: str | None = None,
integration_external_id: str | None = None,
identity_external_id: str | None = None,
identity_provider_external_id: str | None = None,
) -> RpcIntegrationIdentityContext:
pass
@rpc_method
@abstractmethod
def refresh_github_access_token(
self, *, integration_id: int, organization_id: int
) -> RpcIntegration | None:
pass
integration_service = IntegrationService.create_delegation()
| IntegrationService |
python | getsentry__sentry | tests/acceptance/test_performance_landing.py | {
"start": 493,
"end": 2914
} | class ____(AcceptanceTestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(
organization=self.org, name="Mariachi Band", members=[self.user]
)
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.group = self.create_group(project=self.project)
self.login_as(self.user)
self.path = f"/organizations/{self.org.slug}/performance/"
self.page = BasePage(self.browser)
@patch("django.utils.timezone.now")
def test_with_data(self, mock_now: MagicMock) -> None:
mock_now.return_value = before_now()
event = load_data("transaction", timestamp=before_now(minutes=10))
self.store_event(data=event, project_id=self.project.id)
self.project.update(flags=F("flags").bitor(Project.flags.has_transactions))
with self.feature(FEATURE_NAMES):
self.browser.get(self.path)
self.page.wait_until_loaded()
# This test is flakey in that we sometimes load this page before the event is processed
# depend on pytest-retry to reload the page
self.browser.wait_until_not(
'[data-test-id="grid-editable"] [data-test-id="empty-state"]', timeout=2
)
@patch("django.utils.timezone.now")
def test_with_data_and_new_widget_designs(self, mock_now: MagicMock) -> None:
mock_now.return_value = before_now()
event = load_data("transaction", timestamp=before_now(minutes=10))
self.store_event(data=event, project_id=self.project.id)
self.project.update(flags=F("flags").bitor(Project.flags.has_transactions))
FEATURES = (
"organizations:discover-basic",
"organizations:performance-view",
"organizations:performance-new-widget-designs",
)
with self.feature(FEATURES):
self.browser.get(self.path)
self.page.wait_until_loaded()
# This test is flakey in that we sometimes load this page before the event is processed
# depend on pytest-retry to reload the page
self.browser.wait_until_not(
'[data-test-id="grid-editable"] [data-test-id="empty-state"]', timeout=2
)
| PerformanceLandingTest |
python | nedbat__coveragepy | coverage/cmdline.py | {
"start": 12628,
"end": 21651
} | class ____(CoverageOptionParser):
"""Parse one of the new-style commands for coverage.py."""
def __init__(
self,
action: str,
options: list[optparse.Option],
description: str,
usage: str | None = None,
):
"""Create an OptionParser for a coverage.py command.
`action` is the slug to put into `options.action`.
`options` is a list of Option's for the command.
`description` is the description of the command, for the help text.
`usage` is the usage string to display in help.
"""
if usage:
usage = "%prog " + usage
super().__init__(
usage=usage,
description=description,
)
self.set_defaults(action=action)
self.add_options(options)
self.cmd = action
def __eq__(self, other: str) -> bool: # type: ignore[override]
# A convenience equality, so that I can put strings in unit test
# results, and they will compare equal to objects.
return other == f"<CmdOptionParser:{self.cmd}>"
__hash__ = None # type: ignore[assignment]
def get_prog_name(self) -> str:
"""Override of an undocumented function in optparse.OptionParser."""
program_name = super().get_prog_name()
# Include the sub-command for this parser as part of the command.
return f"{program_name} {self.cmd}"
# In lists of Opts, keep them alphabetized by the option names as they appear
# on the command line, since these lists determine the order of the options in
# the help output.
#
# In COMMANDS, keep the keys (command names) alphabetized.
GLOBAL_ARGS = [
Opts.debug,
Opts.help,
Opts.rcfile,
]
COMMANDS = {
"annotate": CmdOptionParser(
"annotate",
[
Opts.directory,
Opts.datafle_input,
Opts.ignore_errors,
Opts.include,
Opts.omit,
]
+ GLOBAL_ARGS,
usage="[options] [modules]",
description=oneline(
"""
Make annotated copies of the given files, marking statements that are executed
with > and statements that are missed with !.
"""
),
),
"combine": CmdOptionParser(
"combine",
[
Opts.append,
Opts.datafile,
Opts.keep,
Opts.quiet,
]
+ GLOBAL_ARGS,
usage="[options] <path1> <path2> ... <pathN>",
description=oneline(
"""
Combine data from multiple coverage files.
The combined results are written to a single
file representing the union of the data. The positional
arguments are data files or directories containing data files.
If no paths are provided, data files in the default data file's
directory are combined.
"""
),
),
"debug": CmdOptionParser(
"debug",
GLOBAL_ARGS,
usage="<topic>",
description=oneline(
"""
Display information about the internals of coverage.py,
for diagnosing problems.
Topics are:
'data' to show a summary of the collected data;
'sys' to show installation information;
'config' to show the configuration;
'premain' to show what is calling coverage;
'pybehave' to show internal flags describing Python behavior;
'sqlite' to show SQLite compilation options.
"""
),
),
"erase": CmdOptionParser(
"erase",
[
Opts.datafile,
]
+ GLOBAL_ARGS,
description="Erase previously collected coverage data.",
),
"help": CmdOptionParser(
"help",
GLOBAL_ARGS,
usage="[command]",
description="Describe how to use coverage.py",
),
"html": CmdOptionParser(
"html",
[
Opts.contexts,
Opts.directory,
Opts.datafle_input,
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.precision,
Opts.quiet,
Opts.show_contexts,
Opts.skip_covered,
Opts.no_skip_covered,
Opts.skip_empty,
Opts.title,
]
+ GLOBAL_ARGS,
usage="[options] [modules]",
description=oneline(
"""
Create an HTML report of the coverage of the files.
Each file gets its own page, with the source decorated to show
executed, excluded, and missed lines.
"""
),
),
"json": CmdOptionParser(
"json",
[
Opts.contexts,
Opts.datafle_input,
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.output_json,
Opts.json_pretty_print,
Opts.quiet,
Opts.show_contexts,
]
+ GLOBAL_ARGS,
usage="[options] [modules]",
description="Generate a JSON report of coverage results.",
),
"lcov": CmdOptionParser(
"lcov",
[
Opts.datafle_input,
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.output_lcov,
Opts.omit,
Opts.quiet,
]
+ GLOBAL_ARGS,
usage="[options] [modules]",
description="Generate an LCOV report of coverage results.",
),
"report": CmdOptionParser(
"report",
[
Opts.contexts,
Opts.datafle_input,
Opts.fail_under,
Opts.format,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.precision,
Opts.sort,
Opts.show_missing,
Opts.skip_covered,
Opts.no_skip_covered,
Opts.skip_empty,
]
+ GLOBAL_ARGS,
usage="[options] [modules]",
description="Report coverage statistics on modules.",
),
"run": CmdOptionParser(
"run",
[
Opts.append,
Opts.branch,
Opts.concurrency,
Opts.context,
Opts.datafile_output,
Opts.include,
Opts.module,
Opts.omit,
Opts.pylib,
Opts.parallel_mode,
Opts.save_signal,
Opts.source,
Opts.timid,
]
+ GLOBAL_ARGS,
usage="[options] <pyfile> [program options]",
description="Run a Python program, measuring code execution.",
),
"xml": CmdOptionParser(
"xml",
[
Opts.datafle_input,
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.output_xml,
Opts.quiet,
Opts.skip_empty,
]
+ GLOBAL_ARGS,
usage="[options] [modules]",
description="Generate an XML report of coverage results.",
),
}
def show_help(
error: str | None = None,
topic: str | None = None,
parser: optparse.OptionParser | None = None,
) -> None:
"""Display an error message, or the named topic."""
assert error or topic or parser
program_path = sys.argv[0]
if program_path.endswith(os.path.sep + "__main__.py"):
# The path is the main module of a package; get that path instead.
program_path = os.path.dirname(program_path)
program_name = os.path.basename(program_path)
if env.WINDOWS:
# entry_points={"console_scripts":...} on Windows makes files
# called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
# invoke coverage-script.py, coverage3-script.py, and
# coverage-3.5-script.py. argv[0] is the .py file, but we want to
# get back to the original form.
auto_suffix = "-script.py"
if program_name.endswith(auto_suffix):
program_name = program_name[: -len(auto_suffix)]
help_params = dict(coverage.__dict__)
help_params["__url__"] = __url__
help_params["program_name"] = program_name
if CTRACER_FILE:
help_params["extension_modifier"] = "with C extension"
else:
help_params["extension_modifier"] = "without C extension"
if error:
print(error, file=sys.stderr)
print(f"Use '{program_name} help' for help.", file=sys.stderr)
elif parser:
print(parser.format_help().strip())
print()
else:
assert topic is not None
help_msg = textwrap.dedent(HELP_TOPICS.get(topic, "")).strip()
if help_msg:
print(help_msg.format(**help_params))
else:
print(f"Don't know topic {topic!r}")
print("Full documentation is at {__url__}".format(**help_params))
OK, ERR, FAIL_UNDER = 0, 1, 2
| CmdOptionParser |
python | pytorch__pytorch | torch/_inductor/cpu_vec_isa.py | {
"start": 6664,
"end": 8469
} | class ____(VecISA):
_bit_width = 512
_macro = ["CPU_CAPABILITY_AVX512"]
_arch_flags = (
"-mavx512f -mavx512dq -mavx512vl -mavx512bw -mfma"
if not _IS_WINDOWS
else "/arch:AVX512"
) # TODO: use cflags
_dtype_nelements = {torch.float: 16, torch.bfloat16: 32, torch.float16: 32}
_is_avx512_bf16_supported = False
def __str__(self) -> str:
return "avx512"
__hash__: Callable[[VecISA], Any] = VecISA.__hash__ # type: ignore[assignment]
_avx512_bf16_code = """
#include <cstdint>
#include <immintrin.h>
extern "C" __m512bh __avx512_bf16_chk_kernel(__m512 a, __m512 b) {
return _mm512_cvtne2ps_pbh(a, b);
}
"""
@functools.cache # noqa: B019
# pyrefly: ignore [bad-override]
def __bool__(self) -> bool:
if super().__bool__():
if config.is_fbcode():
return False
# check avx512_bf16
if torch.cpu._is_avx512_bf16_supported() and not _IS_WINDOWS:
# save _arch_flags
base_flags = self._arch_flags
# temporarily change _arch_flags for avx512_bf16 check_build
self._arch_flags += " -mavx512bf16"
if self.check_build(VecAMX._avx512_bf16_code):
self._is_avx512_bf16_supported = True
# restore _arch_flags
self._arch_flags = base_flags
return True
return False
@functools.lru_cache(None) # noqa: B019
def is_avx512_bf16_supported(self) -> bool:
return self._is_avx512_bf16_supported
def build_arch_flags(self) -> str:
if self._is_avx512_bf16_supported:
return self._arch_flags + " -mavx512bf16"
else:
return self._arch_flags
@dataclasses.dataclass
| VecAVX512 |
python | huggingface__transformers | src/transformers/models/grounding_dino/configuration_grounding_dino.py | {
"start": 906,
"end": 14778
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GroundingDinoModel`]. It is used to instantiate a
Grounding DINO model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Grounding DINO
[IDEA-Research/grounding-dino-tiny](https://huggingface.co/IDEA-Research/grounding-dino-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`PreTrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`):
The configuration of the backbone model.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`):
The config object or dictionary of the text backbone.
num_queries (`int`, *optional*, defaults to 900):
Number of object queries, i.e. detection slots. This is the maximal number of objects
[`GroundingDinoModel`] can detect in a single image.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
position_embedding_type (`str`, *optional*, defaults to `"sine"`):
Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
num_feature_levels (`int`, *optional*, defaults to 4):
The number of input feature levels.
encoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the encoder.
decoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the decoder.
two_stage (`bool`, *optional*, defaults to `True`):
Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
Grounding DINO, which are further fed into the decoder for iterative bounding box refinement.
class_cost (`float`, *optional*, defaults to 1.0):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5.0):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2.0):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
bbox_loss_coefficient (`float`, *optional*, defaults to 5.0):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2.0):
Relative weight of the generalized IoU loss in the object detection loss.
focal_alpha (`float`, *optional*, defaults to 0.25):
Alpha parameter in the focal loss.
disable_custom_kernels (`bool`, *optional*, defaults to `False`):
Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
kernels are not supported by PyTorch ONNX export.
max_text_len (`int`, *optional*, defaults to 256):
The maximum length of the text input.
text_enhancer_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the text enhancer.
fusion_droppath (`float`, *optional*, defaults to 0.1):
The droppath ratio for the fusion module.
fusion_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the fusion module.
embedding_init_target (`bool`, *optional*, defaults to `True`):
Whether to initialize the target with Embedding weights.
query_dim (`int`, *optional*, defaults to 4):
The dimension of the query vector.
decoder_bbox_embed_share (`bool`, *optional*, defaults to `True`):
Whether to share the bbox regression head for all decoder layers.
two_stage_bbox_embed_share (`bool`, *optional*, defaults to `False`):
Whether to share the bbox embedding between the two-stage bbox generator and the region proposal
generation.
positional_embedding_temperature (`float`, *optional*, defaults to 20):
The temperature for Sine Positional Embedding that is used together with vision backbone.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
Examples:
```python
>>> from transformers import GroundingDinoConfig, GroundingDinoModel
>>> # Initializing a Grounding DINO IDEA-Research/grounding-dino-tiny style configuration
>>> configuration = GroundingDinoConfig()
>>> # Initializing a model (with random weights) from the IDEA-Research/grounding-dino-tiny style configuration
>>> model = GroundingDinoModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "grounding-dino"
sub_configs = {"backbone_config": AutoConfig, "text_config": AutoConfig}
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
backbone_config=None,
backbone=None,
use_pretrained_backbone=False,
use_timm_backbone=False,
backbone_kwargs=None,
text_config=None,
num_queries=900,
encoder_layers=6,
encoder_ffn_dim=2048,
encoder_attention_heads=8,
decoder_layers=6,
decoder_ffn_dim=2048,
decoder_attention_heads=8,
is_encoder_decoder=True,
activation_function="relu",
d_model=256,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
auxiliary_loss=False,
position_embedding_type="sine",
num_feature_levels=4,
encoder_n_points=4,
decoder_n_points=4,
two_stage=True,
class_cost=1.0,
bbox_cost=5.0,
giou_cost=2.0,
bbox_loss_coefficient=5.0,
giou_loss_coefficient=2.0,
focal_alpha=0.25,
disable_custom_kernels=False,
# other parameters
max_text_len=256,
text_enhancer_dropout=0.0,
fusion_droppath=0.1,
fusion_dropout=0.0,
embedding_init_target=True,
query_dim=4,
decoder_bbox_embed_share=True,
two_stage_bbox_embed_share=False,
positional_embedding_temperature=20,
init_std=0.02,
layer_norm_eps=1e-5,
**kwargs,
):
if backbone_config is None and backbone is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.")
backbone_config = CONFIG_MAPPING["swin"](
window_size=7,
image_size=224,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
out_indices=[2, 3, 4],
)
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.pop("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
if text_config is None:
text_config = {}
logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).")
self.backbone_config = backbone_config
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
self.num_queries = num_queries
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.auxiliary_loss = auxiliary_loss
self.position_embedding_type = position_embedding_type
# deformable attributes
self.num_feature_levels = num_feature_levels
self.encoder_n_points = encoder_n_points
self.decoder_n_points = decoder_n_points
self.two_stage = two_stage
# Hungarian matcher
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
# Loss coefficients
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.focal_alpha = focal_alpha
self.disable_custom_kernels = disable_custom_kernels
# Text backbone
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "bert")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["bert"]()
self.text_config = text_config
self.max_text_len = max_text_len
# Text Enhancer
self.text_enhancer_dropout = text_enhancer_dropout
# Fusion
self.fusion_droppath = fusion_droppath
self.fusion_dropout = fusion_dropout
# Others
self.embedding_init_target = embedding_init_target
self.query_dim = query_dim
self.decoder_bbox_embed_share = decoder_bbox_embed_share
self.two_stage_bbox_embed_share = two_stage_bbox_embed_share
if two_stage_bbox_embed_share and not decoder_bbox_embed_share:
raise ValueError("If two_stage_bbox_embed_share is True, decoder_bbox_embed_share must be True.")
self.positional_embedding_temperature = positional_embedding_temperature
self.init_std = init_std
self.layer_norm_eps = layer_norm_eps
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
self.tie_encoder_decoder = True
self.tie_encoder_decoder = True
__all__ = ["GroundingDinoConfig"]
| GroundingDinoConfig |
python | pandas-dev__pandas | pandas/tests/arrays/sparse/test_libsparse.py | {
"start": 11877,
"end": 14671
} | class ____:
def test_block_internal(self):
idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block")
assert isinstance(idx, BlockIndex)
assert idx.npoints == 2
tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32))
idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block")
assert isinstance(idx, BlockIndex)
assert idx.npoints == 0
tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32))
idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block")
assert isinstance(idx, BlockIndex)
assert idx.npoints == 4
tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32))
idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block")
assert isinstance(idx, BlockIndex)
assert idx.npoints == 3
tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32))
@pytest.mark.parametrize("i", [5, 10, 100, 101])
def test_make_block_boundary(self, i):
idx = make_sparse_index(i, np.arange(0, i, 2, dtype=np.int32), kind="block")
exp = np.arange(0, i, 2, dtype=np.int32)
tm.assert_numpy_array_equal(idx.blocs, exp)
tm.assert_numpy_array_equal(idx.blengths, np.ones(len(exp), dtype=np.int32))
def test_equals(self):
index = BlockIndex(10, [0, 4], [2, 5])
assert index.equals(index)
assert not index.equals(BlockIndex(10, [0, 4], [2, 6]))
def test_check_integrity(self):
locs = []
lengths = []
# 0-length OK
BlockIndex(0, locs, lengths)
# also OK even though empty
BlockIndex(1, locs, lengths)
msg = "Block 0 extends beyond end"
with pytest.raises(ValueError, match=msg):
BlockIndex(10, [5], [10])
msg = "Block 0 overlaps"
with pytest.raises(ValueError, match=msg):
BlockIndex(10, [2, 5], [5, 3])
def test_to_int_index(self):
locs = [0, 10]
lengths = [4, 6]
exp_inds = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15]
block = BlockIndex(20, locs, lengths)
dense = block.to_int_index()
tm.assert_numpy_array_equal(dense.indices, np.array(exp_inds, dtype=np.int32))
def test_to_block_index(self):
index = BlockIndex(10, [0, 5], [4, 5])
assert index.to_block_index() is index
| TestBlockIndex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.