language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/tracers/stdout.py
|
{
"start": 1250,
"end": 6425
}
|
class ____(BaseTracer):
"""Tracer that calls a function with a single str parameter."""
name: str = "function_callback_handler"
"""The name of the tracer. This is used to identify the tracer in the logs."""
def __init__(self, function: Callable[[str], None], **kwargs: Any) -> None:
"""Create a FunctionCallbackHandler.
Args:
function: The callback function to call.
"""
super().__init__(**kwargs)
self.function_callback = function
def _persist_run(self, run: Run) -> None:
pass
def get_parents(self, run: Run) -> list[Run]:
"""Get the parents of a run.
Args:
run: The run to get the parents of.
Returns:
A list of parent runs.
"""
parents = []
current_run = run
while current_run.parent_run_id:
parent = self.run_map.get(str(current_run.parent_run_id))
if parent:
parents.append(parent)
current_run = parent
else:
break
return parents
def get_breadcrumbs(self, run: Run) -> str:
"""Get the breadcrumbs of a run.
Args:
run: The run to get the breadcrumbs of.
Returns:
A string with the breadcrumbs of the run.
"""
parents = self.get_parents(run)[::-1]
return " > ".join(
f"{parent.run_type}:{parent.name}"
for i, parent in enumerate([*parents, run])
)
# logging methods
def _on_chain_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering {run_type} run with input:\n")
+ f"{try_json_stringify(run.inputs, '[inputs]')}"
)
def _on_chain_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output:\n"
)
+ f"{try_json_stringify(run.outputs, '[outputs]')}"
)
def _on_chain_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/error]', color='red')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] {run_type} run errored with error:\n"
)
+ f"{try_json_stringify(run.error, '[error]')}"
)
def _on_llm_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
inputs = (
{"prompts": [p.strip() for p in run.inputs["prompts"]]}
if "prompts" in run.inputs
else run.inputs
)
self.function_callback(
f"{get_colored_text('[llm/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering LLM run with input:\n")
+ f"{try_json_stringify(inputs, '[inputs]')}"
)
def _on_llm_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting LLM run with output:\n"
)
+ f"{try_json_stringify(run.outputs, '[response]')}"
)
def _on_llm_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/error]', color='red')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] LLM run errored with error:\n"
)
+ f"{try_json_stringify(run.error, '[error]')}"
)
def _on_tool_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[tool/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering Tool run with input:\n")
+ f'"{run.inputs["input"].strip()}"'
)
def _on_tool_end(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
if run.outputs:
self.function_callback(
f"{get_colored_text('[tool/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:\n"
)
+ f'"{str(run.outputs["output"]).strip()}"'
)
def _on_tool_error(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[tool/error]', color='red')} "
+ get_bolded_text(f"[{crumbs}] [{elapsed(run)}] ")
+ f"Tool run errored with error:\n"
f"{run.error}"
)
|
FunctionCallbackHandler
|
python
|
TheAlgorithms__Python
|
graphs/minimum_spanning_tree_kruskal2.py
|
{
"start": 83,
"end": 283
}
|
class ____[T]:
# Disjoint Set Node to store the parent and rank
def __init__(self, data: T) -> None:
self.data = data
self.parent = self
self.rank = 0
|
DisjointSetTreeNode
|
python
|
pyparsing__pyparsing
|
examples/adventureEngine.py
|
{
"start": 6591,
"end": 6888
}
|
class ____(Command):
def __init__(self, quals):
super().__init__("LOOK", "looking")
@staticmethod
def help_description():
return "LOOK or L - describes the current room and any objects in it"
def _do_command(self, player):
player.room.describe()
|
LookCommand
|
python
|
jazzband__django-simple-history
|
simple_history/tests/tests/test_models.py
|
{
"start": 44605,
"end": 47282
}
|
class ____(unittest.TestCase):
def verify_custom_model_name_feature(
self, model, expected_class_name, expected_table_name
):
history_model = model.history.model
self.assertEqual(history_model.__name__, expected_class_name)
self.assertEqual(history_model._meta.db_table, expected_table_name)
def test_instantiate_history_model_with_custom_model_name_as_string(self):
try:
from ..models import OverrideModelNameAsString
except ImportError:
self.fail("{}OverrideModelNameAsString is in wrong module")
expected_cls_name = "MyHistoricalCustomNameModel"
self.verify_custom_model_name_feature(
OverrideModelNameAsString(),
expected_cls_name,
f"tests_{expected_cls_name.lower()}",
)
def test_register_history_model_with_custom_model_name_override(self):
try:
from ..models import OverrideModelNameRegisterMethod1
except ImportError:
self.fail("OverrideModelNameRegisterMethod1 is in wrong module")
cls = OverrideModelNameRegisterMethod1()
expected_cls_name = "MyOverrideModelNameRegisterMethod1"
self.verify_custom_model_name_feature(
cls, expected_cls_name, f"tests_{expected_cls_name.lower()}"
)
from simple_history import register
from ..models import OverrideModelNameRegisterMethod2
try:
register(
OverrideModelNameRegisterMethod2,
custom_model_name=lambda x: f"{x}",
)
except ValueError:
self.assertRaises(ValueError)
def test_register_history_model_with_custom_model_name_from_abstract_model(self):
cls = OverrideModelNameUsingBaseModel1
expected_cls_name = f"Audit{cls.__name__}"
self.verify_custom_model_name_feature(
cls, expected_cls_name, "tests_" + expected_cls_name.lower()
)
def test_register_history_model_with_custom_model_name_from_external_model(self):
from ..models import OverrideModelNameUsingExternalModel1
cls = OverrideModelNameUsingExternalModel1
expected_cls_name = f"Audit{cls.__name__}"
self.verify_custom_model_name_feature(
cls, expected_cls_name, "tests_" + expected_cls_name.lower()
)
from ..models import OverrideModelNameUsingExternalModel2
cls = OverrideModelNameUsingExternalModel2
expected_cls_name = f"Audit{cls.__name__}"
self.verify_custom_model_name_feature(
cls, expected_cls_name, "external_" + expected_cls_name.lower()
)
|
CustomModelNameTests
|
python
|
django-guardian__django-guardian
|
guardian/admin.py
|
{
"start": 19597,
"end": 20452
}
|
class ____(forms.Form):
user = forms.CharField(
label=_("User identification"),
max_length=200,
error_messages={"does_not_exist": _("This user does not exist")},
help_text=_("Enter a value compatible with User.USERNAME_FIELD"),
)
def clean_user(self):
"""Returns `User` instance based on the given identification."""
identification = self.cleaned_data["user"]
user_model = get_user_model()
try:
username_field = user_model.USERNAME_FIELD
except AttributeError:
username_field = "username"
try:
user = user_model.objects.get(**{username_field: identification})
return user
except user_model.DoesNotExist:
raise forms.ValidationError(self.fields["user"].error_messages["does_not_exist"])
|
UserManage
|
python
|
pyca__cryptography
|
tests/x509/test_x509_ext.py
|
{
"start": 18669,
"end": 21504
}
|
class ____:
def test_invalid_policy_identifier(self):
with pytest.raises(TypeError):
x509.PolicyInformation("notanoid", None) # type:ignore[arg-type]
def test_none_policy_qualifiers(self):
pi = x509.PolicyInformation(x509.ObjectIdentifier("1.2.3"), None)
assert pi.policy_identifier == x509.ObjectIdentifier("1.2.3")
assert pi.policy_qualifiers is None
def test_policy_qualifiers(self):
pq = ["string"]
pi = x509.PolicyInformation(x509.ObjectIdentifier("1.2.3"), pq)
assert pi.policy_identifier == x509.ObjectIdentifier("1.2.3")
assert pi.policy_qualifiers == pq
def test_invalid_policy_identifiers(self):
with pytest.raises(TypeError):
x509.PolicyInformation(
x509.ObjectIdentifier("1.2.3"),
[1, 2], # type:ignore[list-item]
)
def test_iter_input(self):
qual = ["foo", "bar"]
pi = x509.PolicyInformation(x509.ObjectIdentifier("1.2.3"), iter(qual))
assert pi.policy_qualifiers is not None
assert list(pi.policy_qualifiers) == qual
def test_repr(self):
pq: typing.List[typing.Union[str, x509.UserNotice]] = [
"string",
x509.UserNotice(None, "hi"),
]
pi = x509.PolicyInformation(x509.ObjectIdentifier("1.2.3"), pq)
assert repr(pi) == (
"<PolicyInformation(policy_identifier=<ObjectIdentifier(oid=1."
"2.3, name=Unknown OID)>, policy_qualifiers=['string', <UserNo"
"tice(notice_reference=None, explicit_text='hi')>])>"
)
def test_eq(self):
pi = x509.PolicyInformation(
x509.ObjectIdentifier("1.2.3"),
["string", x509.UserNotice(None, "hi")],
)
pi2 = x509.PolicyInformation(
x509.ObjectIdentifier("1.2.3"),
["string", x509.UserNotice(None, "hi")],
)
assert pi == pi2
def test_ne(self):
pi = x509.PolicyInformation(x509.ObjectIdentifier("1.2.3"), ["string"])
pi2 = x509.PolicyInformation(
x509.ObjectIdentifier("1.2.3"), ["string2"]
)
pi3 = x509.PolicyInformation(
x509.ObjectIdentifier("1.2.3.4"), ["string"]
)
assert pi != pi2
assert pi != pi3
assert pi != object()
def test_hash(self):
pi = x509.PolicyInformation(
x509.ObjectIdentifier("1.2.3"),
["string", x509.UserNotice(None, "hi")],
)
pi2 = x509.PolicyInformation(
x509.ObjectIdentifier("1.2.3"),
["string", x509.UserNotice(None, "hi")],
)
pi3 = x509.PolicyInformation(x509.ObjectIdentifier("1.2.3"), None)
assert hash(pi) == hash(pi2)
assert hash(pi) != hash(pi3)
|
TestPolicyInformation
|
python
|
walkccc__LeetCode
|
solutions/3167. Better Compression of String/3167.py
|
{
"start": 0,
"end": 435
}
|
class ____:
def betterCompression(self, compressed: str) -> str:
count = collections.Counter()
i = 0
while i < len(compressed):
c = compressed[i]
i += 1
freq = 0
while i < len(compressed) and compressed[i].isdigit():
freq = freq * 10 + int(compressed[i])
i += 1
count[c] += freq
return ''.join([c + str(count[c])
for c in sorted(count.keys())])
|
Solution
|
python
|
milvus-io__pymilvus
|
tests/test_search_result.py
|
{
"start": 2447,
"end": 10995
}
|
class ____:
@pytest.mark.parametrize("pk", [
schema_pb2.IDs(int_id=schema_pb2.LongArray(data=list(range(6)))),
schema_pb2.IDs(str_id=schema_pb2.StringArray(data=[str(i*10) for i in range(6)]))
])
@pytest.mark.parametrize("round_decimal", [
None,
-1,
4,
])
def test_search_result_no_fields_data(self, pk, round_decimal):
result = schema_pb2.SearchResultData(
num_queries=2,
top_k=3,
scores=[1.*i for i in range(6)],
ids=pk,
topks=[3, 3],
)
r = SearchResult(result, round_decimal)
# Iterable
assert len(r) == 2
for hits in r:
assert isinstance(hits, (Hits, HybridHits))
assert len(hits.ids) == 3
assert len(hits.distances) == 3
# slicable
assert len(r[1:]) == 1
first_q, _ = r[0], r[1]
assert len(first_q) == 3
assert len(first_q[:]) == 3
assert len(first_q[1:]) == 2
assert len(first_q[2:]) == 1
assert len(first_q[3:]) == 0
LOGGER.info(first_q[:])
LOGGER.info(first_q[1:])
LOGGER.info(first_q[2:])
first_hit = first_q[0]
LOGGER.info(first_hit)
assert first_hit["distance"] == 0.
assert first_hit["entity"] == {}
@pytest.mark.parametrize("pk", [
schema_pb2.IDs(int_id=schema_pb2.LongArray(data=list(range(6)))),
schema_pb2.IDs(str_id=schema_pb2.StringArray(data=[str(i*10) for i in range(6)]))
])
def test_search_result_with_fields_data(self, pk):
fields_data = [
schema_pb2.FieldData(type=DataType.BOOL, field_name="bool_field", field_id=100,
scalars=schema_pb2.ScalarField(bool_data=schema_pb2.BoolArray(data=[True for i in range(6)]))),
schema_pb2.FieldData(type=DataType.INT8, field_name="int8_field", field_id=101,
scalars=schema_pb2.ScalarField(int_data=schema_pb2.IntArray(data=list(range(6))))),
schema_pb2.FieldData(type=DataType.INT16, field_name="int16_field", field_id=102,
scalars=schema_pb2.ScalarField(int_data=schema_pb2.IntArray(data=list(range(6))))),
schema_pb2.FieldData(type=DataType.INT32, field_name="int32_field", field_id=103,
scalars=schema_pb2.ScalarField(int_data=schema_pb2.IntArray(data=list(range(6))))),
schema_pb2.FieldData(type=DataType.INT64, field_name="int64_field", field_id=104,
scalars=schema_pb2.ScalarField(long_data=schema_pb2.LongArray(data=list(range(6))))),
schema_pb2.FieldData(type=DataType.FLOAT, field_name="float_field", field_id=105,
scalars=schema_pb2.ScalarField(float_data=schema_pb2.FloatArray(data=[i*1. for i in range(6)]))),
schema_pb2.FieldData(type=DataType.DOUBLE, field_name="double_field", field_id=106,
scalars=schema_pb2.ScalarField(double_data=schema_pb2.DoubleArray(data=[i*1. for i in range(6)]))),
schema_pb2.FieldData(type=DataType.VARCHAR, field_name="varchar_field", field_id=107,
scalars=schema_pb2.ScalarField(string_data=schema_pb2.StringArray(data=[str(i*10) for i in range(6)]))),
schema_pb2.FieldData(type=DataType.ARRAY, field_name="int16_array_field", field_id=108,
scalars=schema_pb2.ScalarField(
array_data=schema_pb2.ArrayArray(
data=[schema_pb2.ScalarField(int_data=schema_pb2.IntArray(data=list(range(10)))) for i in range(6)],
element_type=DataType.INT16,
),
)),
schema_pb2.FieldData(type=DataType.ARRAY, field_name="int64_array_field", field_id=109,
scalars=schema_pb2.ScalarField(
array_data=schema_pb2.ArrayArray(
data=[schema_pb2.ScalarField(long_data=schema_pb2.LongArray(data=list(range(10)))) for i in range(6)],
element_type=DataType.INT64,
),
)),
schema_pb2.FieldData(type=DataType.ARRAY, field_name="float_array_field", field_id=110,
scalars=schema_pb2.ScalarField(
array_data=schema_pb2.ArrayArray(
data=[schema_pb2.ScalarField(float_data=schema_pb2.FloatArray(data=[j*1. for j in range(10)])) for i in range(6)],
element_type=DataType.FLOAT,
),
)),
schema_pb2.FieldData(type=DataType.ARRAY, field_name="varchar_array_field", field_id=110,
scalars=schema_pb2.ScalarField(
array_data=schema_pb2.ArrayArray(
data=[schema_pb2.ScalarField(string_data=schema_pb2.StringArray(data=[str(j*1.) for j in range(10)])) for i in range(6)],
element_type=DataType.VARCHAR,
),
)),
schema_pb2.FieldData(type=DataType.JSON, field_name="normal_json_field", field_id=111,
scalars=schema_pb2.ScalarField(json_data=schema_pb2.JSONArray(data=[orjson.dumps({str(i): i for i in range(3)}) for i in range(6)])),
),
schema_pb2.FieldData(type=DataType.JSON, field_name="$meta", field_id=112,
is_dynamic=True,
scalars=schema_pb2.ScalarField(json_data=schema_pb2.JSONArray(data=[orjson.dumps({str(i*100): i}) for i in range(6)])),
),
schema_pb2.FieldData(type=DataType.FLOAT_VECTOR, field_name="float_vector_field", field_id=113,
vectors=schema_pb2.VectorField(
dim=4,
float_vector=schema_pb2.FloatArray(data=[random.random() for i in range(24)]),
),
),
schema_pb2.FieldData(type=DataType.BINARY_VECTOR, field_name="binary_vector_field", field_id=114,
vectors=schema_pb2.VectorField(
dim=8,
binary_vector=os.urandom(6),
),
),
schema_pb2.FieldData(type=DataType.FLOAT16_VECTOR, field_name="float16_vector_field", field_id=115,
vectors=schema_pb2.VectorField(
dim=16,
float16_vector=os.urandom(32),
),
),
schema_pb2.FieldData(type=DataType.BFLOAT16_VECTOR, field_name="bfloat16_vector_field", field_id=116,
vectors=schema_pb2.VectorField(
dim=16,
bfloat16_vector=os.urandom(32),
),
),
schema_pb2.FieldData(type=DataType.INT8_VECTOR, field_name="int8_vector_field", field_id=117,
vectors=schema_pb2.VectorField(
dim=16,
int8_vector=os.urandom(32),
),
),
]
result = schema_pb2.SearchResultData(
fields_data=fields_data,
num_queries=2,
top_k=3,
scores=[1.*i for i in range(6)],
ids=pk,
topks=[3, 3],
output_fields=['$meta']
)
r = SearchResult(result)
LOGGER.info(r[0])
assert len(r) == 2
assert 3 == len(r[0]) == len(r[1])
assert r[0][0].get("entity").get("normal_json_field") == {'0': 0, '1': 1, '2': 2}
# dynamic field
assert r[0][1].get("entity").get('100') == 1
assert r[0][0].get("entity").get("int32_field") == 0
assert r[0][1].get("entity").get("int8_field") == 1
assert r[0][2].get("entity").get("int16_field") == 2
assert r[0][1].get("entity").get("int64_array_field") == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert len(r[0][0].get("entity").get("bfloat16_vector_field")) == 32
assert len(r[0][0].get("entity").get("float16_vector_field")) == 32
assert len(r[0][0].get("entity").get("int8_vector_field")) == 16
|
TestSearchResult
|
python
|
html5lib__html5lib-python
|
html5lib/html5parser.py
|
{
"start": 74548,
"end": 77261
}
|
class ____(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
__slots__ = tuple()
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
startTagHandler = _utils.MethodDispatcher([
("html", Phase.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), startTagTableElement)
])
startTagHandler.default = startTagOther
endTagHandler = _utils.MethodDispatcher([
("caption", endTagCaption),
("table", endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), endTagIgnore)
])
endTagHandler.default = endTagOther
|
InCaptionPhase
|
python
|
apache__airflow
|
airflow-core/tests/unit/core/test_settings.py
|
{
"start": 1893,
"end": 2446
}
|
class ____:
def __init__(self, content: str, module_name: str):
self.content = content
self.settings_root = tempfile.mkdtemp()
filename = f"{module_name}.py"
self.settings_file = os.path.join(self.settings_root, filename)
def __enter__(self):
with open(self.settings_file, "w") as handle:
handle.writelines(self.content)
sys.path.append(self.settings_root)
return self.settings_file
def __exit__(self, *exc_info):
sys.path.remove(self.settings_root)
|
SettingsContext
|
python
|
astropy__astropy
|
astropy/modeling/polynomial.py
|
{
"start": 27574,
"end": 30885
}
|
class ____(_PolyDomainWindow1D):
r"""
1D Polynomial model.
It is defined as:
.. math::
P = \sum_{i=0}^{i=n}C_{i} * x^{i}
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
If None, it is set to (-1, 1)
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain,
window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
# Set domain separately because it's different from
# the orthogonal polynomials.
self._default_domain_window = {
"domain": (-1, 1),
"window": (-1, 1),
}
self.domain = domain or self._default_domain_window["domain"]
self.window = window or self._default_domain_window["window"]
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.horner(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
v = np.empty((self.degree + 1,) + x.shape, dtype=float)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def horner(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[-1] * np.ones_like(x, subok=False)
else:
c0 = coeffs[-1]
for i in range(2, len(coeffs) + 1):
c0 = coeffs[-i] + c0 * x
return c0
@property
def input_units(self):
if self.degree == 0 or self.c1.input_unit is None:
return None
else:
return {self.inputs[0]: self.c0.input_unit / self.c1.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = {}
for i in range(self.degree + 1):
par = getattr(self, f"c{i}")
mapping[par.name] = (
outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i
)
return mapping
|
Polynomial1D
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocolExplicit1.py
|
{
"start": 724,
"end": 865
}
|
class ____(Protocol1, Protocol3):
cm1 = 3
def __init__(self):
im1 = 0
# This should generate an error.
Concrete3()
|
Concrete3
|
python
|
huggingface__transformers
|
src/transformers/models/donut/modeling_donut_swin.py
|
{
"start": 35251,
"end": 36043
}
|
class ____(PreTrainedModel):
config: DonutSwinConfig
base_model_prefix = "donut"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["DonutSwinStage"]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, DonutSwinEmbeddings):
if module.mask_token is not None:
init.zeros_(module.mask_token)
if module.position_embeddings is not None:
init.zeros_(module.position_embeddings)
elif isinstance(module, DonutSwinSelfAttention):
init.zeros_(module.relative_position_bias_table)
@auto_docstring
|
DonutSwinPreTrainedModel
|
python
|
ray-project__ray
|
python/ray/_private/object_ref_generator.py
|
{
"start": 865,
"end": 10554
}
|
class ____:
"""A generator to obtain object references from a task in a streaming manner.
The class is compatible with the Python generator and async generator interfaces.
The class is not thread-safe.
Do not initialize the class and create an instance directly.
The instance should be created by `.remote`.
.. testcode::
import ray
from typing import Generator
@ray.remote(num_returns="streaming")
def gen() -> Generator[int, None, None]:
for i in range(5):
yield i
obj_ref_gen: ray.ObjectRefGenerator = gen.remote()
for obj_ref in obj_ref_gen:
print("Got:", ray.get(obj_ref))
"""
def __init__(self, generator_ref: "ray.ObjectRef", worker: "Worker"):
# The reference to a generator task.
self._generator_ref = generator_ref
# True if an exception has been raised from the generator task.
self._generator_task_raised = False
# Ray's worker class. ray._private.worker.global_worker
self.worker = worker
self.worker.check_connected()
assert hasattr(worker, "core_worker")
# Public APIs
def __iter__(self) -> "ObjectRefGenerator":
return self
def __next__(self) -> "ray.ObjectRef":
"""Waits until a next ref is available and returns the object ref.
Raises StopIteration if there's no more objects
to generate.
The object ref will contain an exception if the task fails.
When the generator task returns N objects, it can return
up to N + 1 objects (if there's a system failure, the
last object will contain a system level exception).
"""
return self._next_sync()
def send(self, value):
raise NotImplementedError("`gen.send` is not supported.")
def throw(self, value):
raise NotImplementedError("`gen.throw` is not supported.")
def close(self):
raise NotImplementedError("`gen.close` is not supported.")
def __aiter__(self) -> "ObjectRefGenerator":
return self
async def __anext__(self):
return await self._next_async()
async def asend(self, value):
raise NotImplementedError("`gen.asend` is not supported.")
async def athrow(self, value):
raise NotImplementedError("`gen.athrow` is not supported.")
async def aclose(self):
raise NotImplementedError("`gen.aclose` is not supported.")
def completed(self) -> "ray.ObjectRef":
"""Returns an object ref that is ready when
a generator task completes.
If the task is failed unexpectedly (e.g., worker failure),
the `ray.get(gen.completed())` raises an exception.
The function returns immediately.
"""
return self._generator_ref
def next_ready(self) -> bool:
"""If True, it means the output of next(gen) is ready and
ray.get(next(gen)) returns immediately. False otherwise.
It returns False when next(gen) raises a StopIteration
(this condition should be checked using is_finished).
The function returns immediately.
"""
self.worker.check_connected()
core_worker = self.worker.core_worker
if self.is_finished():
return False
expected_ref, is_ready = core_worker.peek_object_ref_stream(self._generator_ref)
if is_ready:
return True
ready, _ = ray.wait([expected_ref], timeout=0, fetch_local=False)
return len(ready) > 0
def is_finished(self) -> bool:
"""If True, it means the generator is finished
and all output is taken. False otherwise.
When True, if next(gen) is called, it will raise StopIteration
or StopAsyncIteration
The function returns immediately.
"""
self.worker.check_connected()
core_worker = self.worker.core_worker
finished = core_worker.is_object_ref_stream_finished(self._generator_ref)
if finished:
if self._generator_task_raised:
return True
else:
# We should try ray.get on a generator ref.
# If it raises an exception and
# _generator_task_raised is not set,
# this means the last ref is not taken yet.
try:
ray.get(self._generator_ref)
except Exception:
# The exception from _generator_ref
# hasn't been taken yet.
return False
else:
return True
else:
return False
# Private APIs
def _get_next_ref(self) -> "ray.ObjectRef":
"""Return the next reference from a generator.
Note that the ObjectID generated from a generator
is always deterministic.
"""
self.worker.check_connected()
core_worker = self.worker.core_worker
return core_worker.peek_object_ref_stream(self._generator_ref)[0]
def _next_sync(self, timeout_s: Optional[int | float] = None) -> "ray.ObjectRef":
"""Waits for timeout_s and returns the object ref if available.
If an object is not available within the given timeout, it
returns a nil object reference.
If -1 timeout is provided, it means it waits infinitely.
Waiting is implemented as busy waiting.
Raises StopIteration if there's no more objects
to generate.
The object ref will contain an exception if the task fails.
When the generator task returns N objects, it can return
up to N + 1 objects (if there's a system failure, the
last object will contain a system level exception).
Args:
timeout_s: If the next object is not ready within
this timeout, it returns the nil object ref.
Returns:
ObjectRef corresponding to the next result in the stream.
"""
core_worker = self.worker.core_worker
# Wait for the next ObjectRef to become ready.
expected_ref, is_ready = core_worker.peek_object_ref_stream(self._generator_ref)
if not is_ready:
_, unready = ray.wait([expected_ref], timeout=timeout_s, fetch_local=False)
if len(unready) > 0:
return ray.ObjectRef.nil()
try:
ref = core_worker.try_read_next_object_ref_stream(self._generator_ref)
assert not ref.is_nil()
except ObjectRefStreamEndOfStreamError:
if self._generator_task_raised:
# Exception has been returned.
raise StopIteration from None
try:
# The generator ref contains an exception
# if there's any failure. It contains nothing otherwise.
# In that case, it should raise StopIteration.
ray.get(self._generator_ref)
except Exception:
self._generator_task_raised = True
return self._generator_ref
else:
# The task finished without an exception.
raise StopIteration from None
return ref
async def _suppress_exceptions(self, ref: "ray.ObjectRef") -> None:
# Wrap a streamed ref to avoid asyncio warnings about not retrieving
# the exception when we are just waiting for the ref to become ready.
# The exception will get returned (or warned) to the user once they
# actually await the ref.
try:
await ref
except Exception:
pass
async def _next_async(self, timeout_s: Optional[int | float] = None):
"""Same API as _next_sync, but it is for async context."""
core_worker = self.worker.core_worker
ref, is_ready = core_worker.peek_object_ref_stream(self._generator_ref)
if not is_ready:
# TODO(swang): Avoid fetching the value.
_, unready = await asyncio.wait(
[asyncio.create_task(self._suppress_exceptions(ref))], timeout=timeout_s
)
if len(unready) > 0:
return ray.ObjectRef.nil()
try:
ref = core_worker.try_read_next_object_ref_stream(self._generator_ref)
assert not ref.is_nil()
except ObjectRefStreamEndOfStreamError:
if self._generator_task_raised:
# Exception has been returned.
raise StopAsyncIteration from None
try:
# The generator ref contains an exception
# if there's any failure. It contains nothing otherwise.
# In that case, it should raise StopSyncIteration.
await self._generator_ref
except Exception:
self._generator_task_raised = True
return self._generator_ref
else:
# Meaning the task succeed without failure raise StopAsyncIteration.
raise StopAsyncIteration from None
return ref
def __del__(self):
if hasattr(self.worker, "core_worker"):
# The stream is created when a task is first submitted.
# NOTE: This can be called multiple times
# because python doesn't guarantee __del__ is called
# only once.
self.worker.core_worker.async_delete_object_ref_stream(self._generator_ref)
def __getstate__(self):
raise TypeError(
"You cannot return or pass a generator to other task. "
"Serializing a ObjectRefGenerator is not allowed."
)
|
ObjectRefGenerator
|
python
|
django-haystack__django-haystack
|
test_haystack/discovery/models.py
|
{
"start": 183,
"end": 338
}
|
class ____(models.Model):
author = models.CharField(max_length=255)
content = models.TextField()
def __str__(self):
return self.author
|
Bar
|
python
|
gevent__gevent
|
src/gevent/lock.py
|
{
"start": 1712,
"end": 3593
}
|
class ____(object):
__slots__ = (
'_owned_thread_id',
'_gil',
'_atomic',
'_recursion_depth',
)
# Don't allow re-entry to these functions in a single thread, as
# can happen if a sys.settrace is used. (XXX: What does that even
# mean? Our original implementation that did that has been
# replaced by something more robust)
#
# This is essentially a variant of the (pure-Python) RLock from the
# standard library.
def __init__(self):
self._owned_thread_id = None
self._gil = _allocate_lock()
self._atomic = _allocate_lock()
self._recursion_depth = 0
@atomic
def acquire(self):
current_tid = _get_ident()
if self._owned_thread_id == current_tid:
self._recursion_depth += 1
return True
# Not owned by this thread. Only one thread will make it through this point.
while 1:
self._atomic.release()
try:
self._gil.acquire()
finally:
self._atomic.acquire()
if self._owned_thread_id is None:
break
self._owned_thread_id = current_tid
self._recursion_depth = 1
return True
@atomic
def release(self):
current_tid = _get_ident()
if current_tid != self._owned_thread_id:
raise RuntimeError("%s: Releasing lock not owned by you. You: 0x%x; Owner: 0x%x" % (
self,
current_tid, self._owned_thread_id or 0,
))
self._recursion_depth -= 1
if not self._recursion_depth:
self._owned_thread_id = None
self._gil.release()
def __enter__(self):
self.acquire()
def __exit__(self, t, v, tb):
self.release()
def locked(self):
return self._gil.locked()
|
_GILLock
|
python
|
PyCQA__isort
|
isort/exceptions.py
|
{
"start": 2161,
"end": 2532
}
|
class ____(FileSkipped):
"""Raised when an entire file is skipped due to provided isort settings"""
def __init__(self, file_path: str, **kwargs: str):
super().__init__(
f"{file_path} was skipped as it's listed in 'skip' setting"
" or matches a glob in 'skip_glob' setting",
file_path=file_path,
)
|
FileSkipSetting
|
python
|
django__django
|
django/contrib/postgres/aggregates/general.py
|
{
"start": 822,
"end": 910
}
|
class ____(Aggregate):
function = "BOOL_AND"
output_field = BooleanField()
|
BoolAnd
|
python
|
ray-project__ray
|
python/ray/_common/tests/test_signature.py
|
{
"start": 9335,
"end": 11724
}
|
class ____:
"""Tests for the flatten_args utility function."""
def test_only_positional_args(self):
"""Test flattening with only positional arguments."""
def test_func(a, b, c):
return a + b + c
params = extract_signature(test_func)
flattened = flatten_args(params, (1, 2, 3), {})
expected = [DUMMY_TYPE, 1, DUMMY_TYPE, 2, DUMMY_TYPE, 3]
assert flattened == expected
def test_only_keyword_args(self):
"""Test flattening with only keyword arguments."""
def test_func(a=1, b=2, c=3):
return a + b + c
params = extract_signature(test_func)
flattened = flatten_args(params, (), {"a": 10, "b": 20, "c": 30})
expected = ["a", 10, "b", 20, "c", 30]
assert flattened == expected
def test_mixed_args(self):
"""Test flattening with mixed positional and keyword arguments."""
def test_func(a, b, c=30):
return a + b + c
params = extract_signature(test_func)
flattened = flatten_args(params, (1, 2), {"c": 3})
expected = [DUMMY_TYPE, 1, DUMMY_TYPE, 2, "c", 3]
assert flattened == expected
def test_empty_args(self):
"""Test flattening with no arguments."""
def test_func():
return "hello"
params = extract_signature(test_func)
flattened = flatten_args(params, (), {})
assert flattened == []
def test_complex_types(self):
"""Test flattening with complex argument types."""
def test_func(a, b, c=None):
return a + b
params = extract_signature(test_func)
complex_args = ([1, 2, 3], {"key": "value"})
complex_kwargs = {"c": {"nested": "dict"}}
flattened = flatten_args(params, complex_args, complex_kwargs)
expected = [
DUMMY_TYPE,
[1, 2, 3],
DUMMY_TYPE,
{"key": "value"},
"c",
{"nested": "dict"},
]
assert flattened == expected
def test_invalid_args_raises_error(self):
"""Test that invalid arguments raise TypeError during flattening."""
def test_func(a, b):
return a + b
params = extract_signature(test_func)
with pytest.raises(TypeError):
flatten_args(params, (1, 2, 3), {}) # Too many args
|
TestFlattenArgs
|
python
|
ray-project__ray
|
python/ray/dashboard/memory_utils.py
|
{
"start": 1361,
"end": 2622
}
|
class ____(Enum):
# We don't use enum because enum is not json serializable.
ACTOR_HANDLE = "ACTOR_HANDLE"
PINNED_IN_MEMORY = "PINNED_IN_MEMORY"
LOCAL_REFERENCE = "LOCAL_REFERENCE"
USED_BY_PENDING_TASK = "USED_BY_PENDING_TASK"
CAPTURED_IN_OBJECT = "CAPTURED_IN_OBJECT"
UNKNOWN_STATUS = "UNKNOWN_STATUS"
def get_sorting_type(sort_by: str):
"""Translate string input into SortingType instance"""
sort_by = sort_by.upper()
if sort_by == "PID":
return SortingType.PID
elif sort_by == "OBJECT_SIZE":
return SortingType.OBJECT_SIZE
elif sort_by == "REFERENCE_TYPE":
return SortingType.REFERENCE_TYPE
else:
raise Exception(
"The sort-by input provided is not one of\
PID, OBJECT_SIZE, or REFERENCE_TYPE."
)
def get_group_by_type(group_by: str):
"""Translate string input into GroupByType instance"""
group_by = group_by.upper()
if group_by == "NODE_ADDRESS":
return GroupByType.NODE_ADDRESS
elif group_by == "STACK_TRACE":
return GroupByType.STACK_TRACE
else:
raise Exception(
"The group-by input provided is not one of\
NODE_ADDRESS or STACK_TRACE."
)
|
ReferenceType
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/aggregate.py
|
{
"start": 2043,
"end": 2190
}
|
class ____:
"""The aggregation result for a collection."""
properties: AProperties
total_count: Optional[int]
@dataclass
|
AggregateReturn
|
python
|
getsentry__sentry
|
tests/sentry/db/test_transactions.py
|
{
"start": 926,
"end": 4927
}
|
class ____:
def test_collect_transaction_queries(self) -> None:
with collect_transaction_queries() as queries, outbox_context(flush=False):
Organization.objects.filter(name="org1").first()
User.objects.filter(username="user1").first()
with transaction.atomic(using=router.db_for_write(Organization)):
with pytest.raises(MaxSnowflakeRetryError):
Organization.objects.create(name=None) # type: ignore[misc] # intentional to trigger error
with transaction.atomic(using=router.db_for_write(Organization)):
Organization.objects.create(name="org3")
with transaction.atomic(using=router.db_for_write(User)):
User.objects.create(username="user2")
User.objects.create(username="user3")
assert [(s["transaction"]) for s in queries] == [None, "default", "default", "control"]
def test_bad_transaction_boundaries(self) -> None:
org = Factories.create_organization()
Factories.create_project(organization=org)
Factories.create_user()
with pytest.raises(AssertionError):
with transaction.atomic(using=router.db_for_write(User)):
Factories.create_project(organization=org)
def test_safe_transaction_boundaries(self) -> None:
org = Factories.create_organization()
Factories.create_project(organization=org)
Factories.create_user()
with transaction.atomic(using=router.db_for_write(Organization)):
Factories.create_project(organization=org)
with django_test_transaction_water_mark():
Factories.create_user()
with (
django_test_transaction_water_mark(),
transaction.atomic(using=router.db_for_write(User)),
):
Factories.create_user()
with django_test_transaction_water_mark():
Factories.create_project(organization=org)
Factories.create_user()
with django_test_transaction_water_mark():
Factories.create_project(organization=org)
Factories.create_user()
Factories.create_project(organization=org)
with django_test_transaction_water_mark():
Factories.create_user()
def test_in_test_assert_no_transaction(self) -> None:
def do_assertions():
in_test_assert_no_transaction("Not, in transaction, should not fail")
with pytest.raises(AssertionError):
with transaction.atomic("default"):
in_test_assert_no_transaction("In transaction, should assert")
with transaction.atomic("default"):
with in_test_hide_transaction_boundary():
in_test_assert_no_transaction("Guarded, should not assert")
do_assertions()
with transaction.atomic("default"), django_test_transaction_water_mark():
do_assertions()
def test_transaction_on_commit(self) -> None:
def do_assertions():
calls = []
transaction.on_commit(lambda: calls.append("a"), "default")
with transaction.atomic("default"):
with transaction.atomic("default"):
with pytest.raises(AssertionError):
with transaction.atomic("default"):
transaction.on_commit(lambda: calls.append("no go"), "default")
raise AssertionError("Oh no!")
transaction.on_commit(lambda: calls.append("b"), "default")
transaction.on_commit(lambda: calls.append("c"), "default")
assert calls == ["a"]
assert calls == ["a", "b", "c"]
do_assertions()
with transaction.atomic("default"), django_test_transaction_water_mark():
do_assertions()
@no_silo_test
|
CaseMixin
|
python
|
realpython__materials
|
python-class/vehicles.py
|
{
"start": 341,
"end": 686
}
|
class ____(Vehicle):
def __init__(self, make, model, year, num_seats):
super().__init__(make, model, year)
self.num_seats = num_seats
def drive(self):
print(f'Driving my "{self.make} - {self.model}" on the road')
def __str__(self):
return f'"{self.make} - {self.model}" has {self.num_seats} seats'
|
Car
|
python
|
pydantic__pydantic
|
pydantic/v1/networks.py
|
{
"start": 11890,
"end": 11978
}
|
class ____(AnyUrl):
allowed_schemes = {'http', 'https'}
__slots__ = ()
|
AnyHttpUrl
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_dynamic.py
|
{
"start": 45631,
"end": 51129
}
|
class ____(
_WriteOnlyFixture,
_UOWTests,
_fixtures.FixtureTest,
testing.AssertsExecutionResults,
):
__sparse_driver_backend__ = True
@testing.fixture
def passive_deletes_fixture(self, decl_base, connection):
"""passive deletes fixture
this fixture is separate from the FixtureTest setup because we need
to produce the related Table using ON DELETE cascade for the
foreign key.
"""
def go(passive_deletes, cascade_deletes):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(Identity(), primary_key=True)
data: Mapped[str]
bs: WriteOnlyMapped["B"] = relationship( # noqa: F821
passive_deletes=passive_deletes,
cascade=(
"all, delete-orphan"
if cascade_deletes
else "save-update, merge"
),
order_by="B.id",
)
class B(decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(Identity(), primary_key=True)
a_id: Mapped[int] = mapped_column(
ForeignKey(
"a.id",
ondelete="cascade" if cascade_deletes else "set null",
),
nullable=not cascade_deletes,
)
decl_base.metadata.create_all(connection)
return A, B
yield go
@testing.combinations(
"empty", "persistent", "transient", argnames="merge_type"
)
def test_merge_persistent(self, merge_type, user_address_fixture):
addresses = self.tables.addresses
User, Address = user_address_fixture(
addresses_args={"order_by": addresses.c.email_address}
)
sess = fixture_session(autoflush=False)
a1 = Address(email_address="a1")
a2 = Address(email_address="a2")
a3 = Address(email_address="a3")
u1 = User(name="jack", addresses=[a2, a3])
if merge_type == "transient":
# merge transient. no collection iteration is implied by this.
u1 = sess.merge(u1)
sess.add(a1)
else:
sess.add_all([u1, a1])
sess.flush()
if merge_type == "persistent":
u1 = User(id=u1.id, name="jane", addresses=[a1, a3])
# merge of populated list into persistent not supported with
# write_only because we would need to iterate the existing list
with self._expect_no_iteration():
u1 = sess.merge(u1)
elif merge_type == "empty":
# merge while omitting the "too large to load" collection
# works fine.
u1 = User(id=u1.id, name="jane")
u1 = sess.merge(u1)
eq_(
attributes.get_history(
u1, "addresses", PassiveFlag.PASSIVE_NO_FETCH
),
([], [], []),
)
sess.flush()
eq_(sess.scalars(u1.addresses.select()).all(), [a2, a3])
def test_passive_deletes_required(self, user_address_fixture):
addresses = self.tables.addresses
User, Address = user_address_fixture(
addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "save-update",
}
)
sess = fixture_session(
autoflush=True,
)
u = User(
name="ed",
addresses=[Address(email_address=letter) for letter in "abcdef"],
)
sess.add(u)
sess.commit()
sess.delete(u)
with expect_raises_message(
exc.InvalidRequestError,
"Attribute User.addresses can't load the existing state from the "
"database for this operation; full iteration is not permitted.",
):
sess.commit()
@testing.combinations(True, False, argnames="cascade_deletes")
def test_passive_deletes_succeed(
self, passive_deletes_fixture, connection, cascade_deletes
):
A, B = passive_deletes_fixture(True, cascade_deletes)
sess = fixture_session(bind=connection)
a1 = A(data="d1", bs=[B(), B(), B()])
sess.add(a1)
sess.commit()
sess.delete(a1)
sess.commit()
if testing.requires.foreign_keys.enabled and cascade_deletes:
eq_(sess.scalar(select(func.count()).select_from(B)), 0)
else:
eq_(sess.scalar(select(func.count()).select_from(B)), 3)
@testing.combinations(True, False, argnames="cascade_deletes")
def test_remove_orphans(
self, passive_deletes_fixture, connection, cascade_deletes
):
A, B = passive_deletes_fixture(True, cascade_deletes)
sess = fixture_session(bind=connection)
b1, b2, b3 = B(), B(), B()
a1 = A(data="d1", bs=[b1, b2, b3])
sess.add(a1)
sess.commit()
eq_(sess.scalars(a1.bs.select()).all(), [b1, b2, b3])
a1.bs.remove(b2)
sess.commit()
eq_(sess.scalars(a1.bs.select()).all(), [b1, b3])
if cascade_deletes:
eq_(sess.scalar(select(func.count()).select_from(B)), 2)
else:
eq_(sess.scalar(select(func.count()).select_from(B)), 3)
|
WriteOnlyUOWTest
|
python
|
dask__distributed
|
distributed/shuffle/tests/test_shuffle.py
|
{
"start": 96825,
"end": 98815
}
|
class ____(_ShuffleRunManager):
def __init__(self, plugin):
super().__init__(plugin)
self.in_fetch = asyncio.Event()
self.block_fetch = asyncio.Event()
async def _fetch(self, *args, **kwargs):
result = await super()._fetch(*args, **kwargs)
self.in_fetch.set()
await self.block_fetch.wait()
return result
@mock.patch(
"distributed.shuffle.ShuffleSchedulerPlugin",
RequestCountingSchedulerPlugin,
)
@mock.patch(
"distributed.shuffle._worker_plugin._ShuffleRunManager",
PostFetchBlockingManager,
)
@gen_cluster(
client=True,
nthreads=[("", 2)] * 2,
config={
"distributed.scheduler.allowed-failures": 0,
"distributed.p2p.comm.message-size-limit": "10 B",
},
)
async def test_workers_do_not_spam_get_requests(c, s, a, b):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-02-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
s.remove_plugin("shuffle")
shuffle_extS = RequestCountingSchedulerPlugin(s)
shuffle_extA = a.plugins["shuffle"]
shuffle_extB = b.plugins["shuffle"]
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", npartitions=100, force=True)
out = c.compute(out.x.size)
shuffle_id = await wait_until_new_shuffle_is_initialized(s)
key = barrier_key(shuffle_id)
await shuffle_extA.shuffle_runs.in_fetch.wait()
await shuffle_extB.shuffle_runs.in_fetch.wait()
shuffle_extA.shuffle_runs.block_fetch.set()
barrier_task = s.tasks[key]
while any(
ts.state not in ("processing", "memory") for ts in barrier_task.dependencies
):
await asyncio.sleep(0.1)
shuffle_extB.shuffle_runs.block_fetch.set()
await out
assert sum(shuffle_extS.counts.values()) == 2
del out
await assert_worker_cleanup(a)
await assert_worker_cleanup(b)
await assert_scheduler_cleanup(s)
|
PostFetchBlockingManager
|
python
|
PyCQA__pylint
|
tests/functional/c/class_protocol_ellipsis.py
|
{
"start": 164,
"end": 922
}
|
class ____:
"""The "invalid-*-returned" messages shouldn't be emitted for stub functions
Original issue: https://github.com/pylint-dev/pylint/issues/4736"""
def __len__(self) -> int:
...
def __hash__(self) -> int:
...
def __index__(self) -> int:
...
def __iter__(self) -> Iterator[Any]:
...
def __bool__(self) -> bool:
...
def __repr__(self) -> object:
...
def __str__(self) -> str:
...
def __bytes__(self) -> bytes:
...
def __length_hint__(self) -> int:
...
def __format__(self, format_spec: str) -> str:
...
def __getnewargs__(self) -> tuple:
...
def __getnewargs_ex__(self) -> tuple:
...
|
MyClass
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_syntax_extensions.py
|
{
"start": 8638,
"end": 9988
}
|
class ____(
fixtures.CacheKeyFixture, fixtures.TestBase, AssertsCompiledSQL
):
__dialect__ = "default"
def test_render(self):
t = Table(
"t1", MetaData(), Column("c1", Integer), Column("c2", Integer)
)
stmt = select(t).ext(ColumnExpressionExt(t.c.c1, t.c.c2))
self.assert_compile(
stmt,
"SELECT COLUMN EXPRESSIONS (t1.c1, t1.c2) t1.c1, t1.c2 FROM t1",
)
def test_adaptation(self):
t = Table(
"t1", MetaData(), Column("c1", Integer), Column("c2", Integer)
)
s1 = select(t).subquery()
s2 = select(t).ext(ColumnExpressionExt(t.c.c1, t.c.c2))
s3 = sql_util.ClauseAdapter(s1).traverse(s2)
self.assert_compile(
s3,
"SELECT COLUMN EXPRESSIONS (anon_1.c1, anon_1.c2) "
"anon_1.c1, anon_1.c2 FROM "
"(SELECT t1.c1 AS c1, t1.c2 AS c2 FROM t1) AS anon_1",
)
def test_compare(self):
t = Table(
"t1", MetaData(), Column("c1", Integer), Column("c2", Integer)
)
self._run_compare_fixture(
lambda: (
select(t).ext(ColumnExpressionExt(t.c.c1, t.c.c2)),
select(t).ext(ColumnExpressionExt(t.c.c1)),
select(t),
)
)
|
TestExpressionExtensions
|
python
|
explosion__spaCy
|
spacy/lang/he/__init__.py
|
{
"start": 117,
"end": 298
}
|
class ____(BaseDefaults):
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
writing_system = {"direction": "rtl", "has_case": False, "has_letters": True}
|
HebrewDefaults
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/auth.py
|
{
"start": 8286,
"end": 10076
}
|
class ____(Response):
"""
Response of auth.edit_user endpoint.
:param updated: Number of users updated (0 or 1)
:type updated: float
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "auth"
_action = "edit_user"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of users updated (0 or 1)",
"enum": [0, 1],
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[float] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(EditUserResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[float]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[float]) -> None:
if value is None:
self._property_updated = None
return
self.assert_isinstance(value, "updated", six.integer_types + (float,))
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
|
EditUserResponse
|
python
|
django__django
|
tests/bash_completion/tests.py
|
{
"start": 253,
"end": 3911
}
|
class ____(unittest.TestCase):
"""
Testing the Python level bash completion code.
This requires setting up the environment as if we got passed data
from bash.
"""
def setUp(self):
self.old_DJANGO_AUTO_COMPLETE = os.environ.get("DJANGO_AUTO_COMPLETE")
os.environ["DJANGO_AUTO_COMPLETE"] = "1"
def tearDown(self):
if self.old_DJANGO_AUTO_COMPLETE:
os.environ["DJANGO_AUTO_COMPLETE"] = self.old_DJANGO_AUTO_COMPLETE
else:
del os.environ["DJANGO_AUTO_COMPLETE"]
def _user_input(self, input_str):
"""
Set the environment and the list of command line arguments.
This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is
an array consisting of the individual words in the current command
line, the latter is the index of the current cursor position, so in
case a word is completed and the cursor is placed after a whitespace,
$COMP_CWORD must be incremented by 1:
* 'django-admin start' -> COMP_CWORD=1
* 'django-admin startproject' -> COMP_CWORD=1
* 'django-admin startproject ' -> COMP_CWORD=2
"""
os.environ["COMP_WORDS"] = input_str
idx = len(input_str.split(" ")) - 1 # Index of the last word
comp_cword = idx + 1 if input_str.endswith(" ") else idx
os.environ["COMP_CWORD"] = str(comp_cword)
sys.argv = input_str.split()
def _run_autocomplete(self):
util = ManagementUtility(argv=sys.argv)
with captured_stdout() as stdout:
try:
util.autocomplete()
except SystemExit:
pass
return stdout.getvalue().strip().split("\n")
def test_django_admin_py(self):
"django_admin.py will autocomplete option flags"
self._user_input("django-admin sqlmigrate --verb")
output = self._run_autocomplete()
self.assertEqual(output, ["--verbosity="])
def test_manage_py(self):
"manage.py will autocomplete option flags"
self._user_input("manage.py sqlmigrate --verb")
output = self._run_autocomplete()
self.assertEqual(output, ["--verbosity="])
def test_custom_command(self):
"A custom command can autocomplete option flags"
self._user_input("django-admin test_command --l")
output = self._run_autocomplete()
self.assertEqual(output, ["--list"])
def test_subcommands(self):
"Subcommands can be autocompleted"
self._user_input("django-admin sql")
output = self._run_autocomplete()
self.assertEqual(output, ["sqlflush sqlmigrate sqlsequencereset"])
def test_completed_subcommand(self):
"Show option flags in case a subcommand is completed"
self._user_input("django-admin startproject ") # Trailing whitespace
output = self._run_autocomplete()
for item in output:
self.assertTrue(item.startswith("--"))
def test_help(self):
"No errors, just an empty list if there are no autocomplete options"
self._user_input("django-admin help --")
output = self._run_autocomplete()
self.assertEqual(output, [""])
def test_app_completion(self):
"Application names will be autocompleted for an AppCommand"
self._user_input("django-admin sqlmigrate a")
output = self._run_autocomplete()
a_labels = sorted(
app_config.label
for app_config in apps.get_app_configs()
if app_config.label.startswith("a")
)
self.assertEqual(output, a_labels)
|
BashCompletionTests
|
python
|
modin-project__modin
|
stress_tests/kaggle/kaggle4.py
|
{
"start": 10067,
"end": 12565
}
|
class ____(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, base_models, meta_model, n_folds=5):
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
def fit(self, X, y):
self.base_models_ = [[] for _ in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)
out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
self.meta_model_.fit(out_of_fold_predictions, y)
return self
def predict(self, X):
meta_features = np.column_stack(
[
np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)
for base_models in self.base_models_
]
)
return self.meta_model_.predict(meta_features)
stacked_averaged_models = StackingAveragedModels(
base_models=(ENet, GBoost, KRR), meta_model=lasso
)
score = rmsle_cv(stacked_averaged_models)
print(
"Stacking Averaged models score: {:.4f} ({:.4f})".format(score.mean(), score.std())
)
def rmsle(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
stacked_averaged_models.fit(train.values, y_train)
stacked_train_pred = stacked_averaged_models.predict(train.values)
stacked_pred = np.expm1(stacked_averaged_models.predict(test.values))
print(rmsle(y_train, stacked_train_pred))
model_xgb.fit(train, y_train)
xgb_train_pred = model_xgb.predict(train)
xgb_pred = np.expm1(model_xgb.predict(test))
print(rmsle(y_train, xgb_train_pred))
model_lgb.fit(train, y_train)
lgb_train_pred = model_lgb.predict(train)
lgb_pred = np.expm1(model_lgb.predict(test.values))
print(rmsle(y_train, lgb_train_pred))
print("RMSLE score on train data:")
print(
rmsle(
y_train,
stacked_train_pred * 0.70 + xgb_train_pred * 0.15 + lgb_train_pred * 0.15,
)
)
ensemble = stacked_pred * 0.70 + xgb_pred * 0.15 + lgb_pred * 0.15
sub = pd.DataFrame()
sub["Id"] = test_ID
sub["SalePrice"] = ensemble
sub.to_csv("submission.csv", index=False)
|
StackingAveragedModels
|
python
|
django__django
|
tests/schema/models.py
|
{
"start": 1237,
"end": 1382
}
|
class ____(models.Model):
name = models.CharField(max_length=255, db_index=True)
class Meta:
apps = new_apps
|
AuthorWithIndexedName
|
python
|
aio-libs__aiohttp
|
aiohttp/client_proto.py
|
{
"start": 584,
"end": 12098
}
|
class ____(BaseProtocol, DataQueue[tuple[RawResponseMessage, StreamReader]]):
"""Helper class to adapt between Protocol and StreamReader."""
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
BaseProtocol.__init__(self, loop=loop)
DataQueue.__init__(self, loop)
self._should_close = False
self._payload: StreamReader | None = None
self._skip_payload = False
self._payload_parser: WebSocketReader | None = None
self._timer = None
self._tail = b""
self._upgraded = False
self._parser: HttpResponseParser | None = None
self._read_timeout: float | None = None
self._read_timeout_handle: asyncio.TimerHandle | None = None
self._timeout_ceil_threshold: float | None = 5
self._closed: None | asyncio.Future[None] = None
self._connection_lost_called = False
@property
def closed(self) -> None | asyncio.Future[None]:
"""Future that is set when the connection is closed.
This property returns a Future that will be completed when the connection
is closed. The Future is created lazily on first access to avoid creating
futures that will never be awaited.
Returns:
- A Future[None] if the connection is still open or was closed after
this property was accessed
- None if connection_lost() was already called before this property
was ever accessed (indicating no one is waiting for the closure)
"""
if self._closed is None and not self._connection_lost_called:
self._closed = self._loop.create_future()
return self._closed
@property
def upgraded(self) -> bool:
return self._upgraded
@property
def should_close(self) -> bool:
return bool(
self._should_close
or (self._payload is not None and not self._payload.is_eof())
or self._upgraded
or self._exception is not None
or self._payload_parser is not None
or self._buffer
or self._tail
)
def force_close(self) -> None:
self._should_close = True
def close(self) -> None:
self._exception = None # Break cyclic references
transport = self.transport
if transport is not None:
transport.close()
self.transport = None
self._payload = None
self._drop_timeout()
def abort(self) -> None:
self._exception = None # Break cyclic references
transport = self.transport
if transport is not None:
transport.abort()
self.transport = None
self._payload = None
self._drop_timeout()
def is_connected(self) -> bool:
return self.transport is not None and not self.transport.is_closing()
def connection_lost(self, exc: BaseException | None) -> None:
self._connection_lost_called = True
self._drop_timeout()
original_connection_error = exc
reraised_exc = original_connection_error
connection_closed_cleanly = original_connection_error is None
if self._closed is not None:
# If someone is waiting for the closed future,
# we should set it to None or an exception. If
# self._closed is None, it means that
# connection_lost() was called already
# or nobody is waiting for it.
if connection_closed_cleanly:
set_result(self._closed, None)
else:
assert original_connection_error is not None
set_exception(
self._closed,
ClientConnectionError(
f"Connection lost: {original_connection_error !s}",
),
original_connection_error,
)
if self._payload_parser is not None:
with suppress(Exception): # FIXME: log this somehow?
self._payload_parser.feed_eof()
uncompleted = None
if self._parser is not None:
try:
uncompleted = self._parser.feed_eof()
except Exception as underlying_exc:
if self._payload is not None:
client_payload_exc_msg = (
f"Response payload is not completed: {underlying_exc !r}"
)
if not connection_closed_cleanly:
client_payload_exc_msg = (
f"{client_payload_exc_msg !s}. "
f"{original_connection_error !r}"
)
set_exception(
self._payload,
ClientPayloadError(client_payload_exc_msg),
underlying_exc,
)
if not self.is_eof():
if isinstance(original_connection_error, OSError):
reraised_exc = ClientOSError(*original_connection_error.args)
if connection_closed_cleanly:
reraised_exc = ServerDisconnectedError(uncompleted)
# assigns self._should_close to True as side effect,
# we do it anyway below
underlying_non_eof_exc = (
_EXC_SENTINEL
if connection_closed_cleanly
else original_connection_error
)
assert underlying_non_eof_exc is not None
assert reraised_exc is not None
self.set_exception(reraised_exc, underlying_non_eof_exc)
self._should_close = True
self._parser = None
self._payload = None
self._payload_parser = None
self._reading_paused = False
super().connection_lost(reraised_exc)
def eof_received(self) -> None:
# should call parser.feed_eof() most likely
self._drop_timeout()
def pause_reading(self) -> None:
super().pause_reading()
self._drop_timeout()
def resume_reading(self) -> None:
super().resume_reading()
self._reschedule_timeout()
def set_exception(
self,
exc: type[BaseException] | BaseException,
exc_cause: BaseException = _EXC_SENTINEL,
) -> None:
self._should_close = True
self._drop_timeout()
super().set_exception(exc, exc_cause)
def set_parser(self, parser: Any, payload: Any) -> None:
# TODO: actual types are:
# parser: WebSocketReader
# payload: WebSocketDataQueue
# but they are not generi enough
# Need an ABC for both types
self._payload = payload
self._payload_parser = parser
self._drop_timeout()
if self._tail:
data, self._tail = self._tail, b""
self.data_received(data)
def set_response_params(
self,
*,
timer: BaseTimerContext | None = None,
skip_payload: bool = False,
read_until_eof: bool = False,
auto_decompress: bool = True,
read_timeout: float | None = None,
read_bufsize: int = 2**16,
timeout_ceil_threshold: float = 5,
max_line_size: int = 8190,
max_field_size: int = 8190,
) -> None:
self._skip_payload = skip_payload
self._read_timeout = read_timeout
self._timeout_ceil_threshold = timeout_ceil_threshold
self._parser = HttpResponseParser(
self,
self._loop,
read_bufsize,
timer=timer,
payload_exception=ClientPayloadError,
response_with_body=not skip_payload,
read_until_eof=read_until_eof,
auto_decompress=auto_decompress,
max_line_size=max_line_size,
max_field_size=max_field_size,
)
if self._tail:
data, self._tail = self._tail, b""
self.data_received(data)
def _drop_timeout(self) -> None:
if self._read_timeout_handle is not None:
self._read_timeout_handle.cancel()
self._read_timeout_handle = None
def _reschedule_timeout(self) -> None:
timeout = self._read_timeout
if self._read_timeout_handle is not None:
self._read_timeout_handle.cancel()
if timeout:
self._read_timeout_handle = self._loop.call_later(
timeout, self._on_read_timeout
)
else:
self._read_timeout_handle = None
def start_timeout(self) -> None:
self._reschedule_timeout()
@property
def read_timeout(self) -> float | None:
return self._read_timeout
@read_timeout.setter
def read_timeout(self, read_timeout: float | None) -> None:
self._read_timeout = read_timeout
def _on_read_timeout(self) -> None:
exc = SocketTimeoutError("Timeout on reading data from socket")
self.set_exception(exc)
if self._payload is not None:
set_exception(self._payload, exc)
def data_received(self, data: bytes) -> None:
self._reschedule_timeout()
if not data:
return
# custom payload parser - currently always WebSocketReader
if self._payload_parser is not None:
eof, tail = self._payload_parser.feed_data(data)
if eof:
self._payload = None
self._payload_parser = None
if tail:
self.data_received(tail)
return
if self._upgraded or self._parser is None:
# i.e. websocket connection, websocket parser is not set yet
self._tail += data
return
# parse http messages
try:
messages, upgraded, tail = self._parser.feed_data(data)
except BaseException as underlying_exc:
if self.transport is not None:
# connection.release() could be called BEFORE
# data_received(), the transport is already
# closed in this case
self.transport.close()
# should_close is True after the call
if isinstance(underlying_exc, HttpProcessingError):
exc = HttpProcessingError(
code=underlying_exc.code,
message=underlying_exc.message,
headers=underlying_exc.headers,
)
else:
exc = HttpProcessingError()
self.set_exception(exc, underlying_exc)
return
self._upgraded = upgraded
payload: StreamReader | None = None
for message, payload in messages:
if message.should_close:
self._should_close = True
self._payload = payload
if self._skip_payload or message.code in EMPTY_BODY_STATUS_CODES:
self.feed_data((message, EMPTY_PAYLOAD))
else:
self.feed_data((message, payload))
if payload is not None:
# new message(s) was processed
# register timeout handler unsubscribing
# either on end-of-stream or immediately for
# EMPTY_PAYLOAD
if payload is not EMPTY_PAYLOAD:
payload.on_eof(self._drop_timeout)
else:
self._drop_timeout()
if upgraded and tail:
self.data_received(tail)
|
ResponseHandler
|
python
|
google__jax
|
jax/_src/interpreters/ad.py
|
{
"start": 34957,
"end": 36942
}
|
class ____(Tracer):
__slots__ = ['primal', 'tangent']
def __init__(self, trace, primal, tangent):
if config.enable_checks.value:
_primal_tangent_shapes_match(primal, tangent)
self._trace = trace
self.primal = primal
self.tangent = tangent
def _short_repr(self):
return f"GradTracer<{self.aval}>"
@property
def aval(self):
return get_aval(self.primal)
def cur_qdd(self):
return core.cur_qdd(self.primal)
def full_lower(self):
if type(self.tangent) is Zero:
return core.full_lower(self.primal)
else:
return self
def to_concrete_value(self):
return core.to_concrete_value(self.primal)
def get_referent(self):
return core.get_referent(self.primal)
def type_state(self):
return self.primal.type_state()
def _primal_tangent_shapes_match(primal, tangent):
if type(tangent) is not Zero:
primal_aval = get_aval(primal).strip_weak_type()
tangent_aval = get_aval(tangent).strip_weak_type()
if not isinstance(primal_aval, core.ShapedArray):
return # TODO(mattjj,dougalm)
assert core.definitely_equal_shape(primal_aval.shape, tangent_aval.shape), (
primal_aval.shape, tangent_aval.shape)
expected_tangent_dtype = core.primal_dtype_to_tangent_dtype(primal_aval.dtype)
assert expected_tangent_dtype == tangent_aval.dtype, (
expected_tangent_dtype, tangent_aval.dtype)
if (not primal_aval.sharding.mesh.empty and
not tangent_aval.sharding.mesh.empty and
(primal_aval.sharding.mesh._any_axis_explicit or
tangent_aval.sharding.mesh._any_axis_explicit)):
assert primal_aval.sharding == tangent_aval.sharding, (
primal_aval.sharding, tangent_aval.sharding)
call_param_updaters: dict[core.Primitive, Callable] = {}
call_linearize_param_updaters: dict[core.Primitive, Callable] = {}
call_transpose_param_updaters: dict[core.Primitive, Callable] = {}
# -------------------- Linearize trace --------------------
|
JVPTracer
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_gradient09.py
|
{
"start": 315,
"end": 1439
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_gradient09.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [56159616, 61364480]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.set_plotarea({"gradient": {"colors": ["#DDEBCF", "#9CB86E", "#156B13"]}})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
python-poetry__poetry
|
src/poetry/poetry.py
|
{
"start": 676,
"end": 2399
}
|
class ____(BasePoetry):
VERSION = __version__
def __init__(
self,
file: Path,
local_config: dict[str, Any],
package: ProjectPackage,
locker: Locker,
config: Config,
disable_cache: bool = False,
) -> None:
from poetry.repositories.repository_pool import RepositoryPool
super().__init__(file, local_config, package, pyproject_type=PyProjectTOML)
self._locker = locker
self._config = config
self._pool = RepositoryPool(config=config)
self._plugin_manager: PluginManager | None = None
self._disable_cache = disable_cache
@property
def pyproject(self) -> PyProjectTOML:
pyproject = super().pyproject
return cast("PyProjectTOML", pyproject)
@property
def file(self) -> TOMLFile:
return self.pyproject.file
@property
def locker(self) -> Locker:
return self._locker
@property
def pool(self) -> RepositoryPool:
return self._pool
@property
def config(self) -> Config:
return self._config
@property
def disable_cache(self) -> bool:
return self._disable_cache
def set_locker(self, locker: Locker) -> Poetry:
self._locker = locker
return self
def set_pool(self, pool: RepositoryPool) -> Poetry:
self._pool = pool
return self
def set_config(self, config: Config) -> Poetry:
self._config = config
return self
def get_sources(self) -> list[Source]:
return [
Source(**source)
for source in self.pyproject.data.get("tool", {})
.get("poetry", {})
.get("source", [])
]
|
Poetry
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1514857,
"end": 1515760
}
|
class ____(Transform):
"""
FlattenTransform schema wrapper.
Parameters
----------
flatten : Sequence[str, :class:`FieldName`]
An array of one or more data fields containing arrays to flatten. If multiple fields
are specified, their array values should have a parallel structure, ideally with the
same length. If the lengths of parallel arrays do not match, the longest array will
be used with ``null`` values added for missing entries.
as : Sequence[str, :class:`FieldName`]
The output field names for extracted array values.
**Default value:** The field name of the corresponding array field
"""
_schema = {"$ref": "#/definitions/FlattenTransform"}
def __init__(
self, flatten: Optional[Sequence[str | SchemaBase]] = Undefined, **kwds
):
super().__init__(flatten=flatten, **kwds)
|
FlattenTransform
|
python
|
pandas-dev__pandas
|
pandas/tests/arrays/categorical/test_analytics.py
|
{
"start": 283,
"end": 13378
}
|
class ____:
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_not_ordered_raises(self, aggregation):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
msg = f"Categorical is not ordered for operation {aggregation}"
agg_func = getattr(cat, aggregation)
with pytest.raises(TypeError, match=msg):
agg_func()
ufunc = np.minimum if aggregation == "min" else np.maximum
with pytest.raises(TypeError, match=msg):
ufunc.reduce(cat)
def test_min_max_ordered(self, index_or_series_or_array):
cat = Categorical(["a", "b", "c", "d"], ordered=True)
obj = index_or_series_or_array(cat)
_min = obj.min()
_max = obj.max()
assert _min == "a"
assert _max == "d"
assert np.minimum.reduce(obj) == "a"
assert np.maximum.reduce(obj) == "d"
# TODO: raises if we pass axis=0 (on Index and Categorical, not Series)
cat = Categorical(
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
)
obj = index_or_series_or_array(cat)
_min = obj.min()
_max = obj.max()
assert _min == "d"
assert _max == "a"
assert np.minimum.reduce(obj) == "d"
assert np.maximum.reduce(obj) == "a"
def test_min_max_reduce(self):
# GH52788
cat = Categorical(["a", "b", "c", "d"], ordered=True)
df = DataFrame(cat)
result_max = df.agg("max")
expected_max = Series(Categorical(["d"], dtype=cat.dtype))
tm.assert_series_equal(result_max, expected_max)
result_min = df.agg("min")
expected_min = Series(Categorical(["a"], dtype=cat.dtype))
tm.assert_series_equal(result_min, expected_min)
@pytest.mark.parametrize(
"categories,expected",
[
(list("ABC"), np.nan),
([1, 2, 3], np.nan),
pytest.param(
Series(date_range("2020-01-01", periods=3), dtype="category"),
NaT,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/29962"
),
),
],
)
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_ordered_empty(self, categories, expected, aggregation):
# GH 30227
cat = Categorical([], categories=categories, ordered=True)
agg_func = getattr(cat, aggregation)
result = agg_func()
assert result is expected
@pytest.mark.parametrize(
"values, categories",
[(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])],
)
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_with_nan(self, values, categories, function, skipna):
# GH 25303
cat = Categorical(values, categories=categories, ordered=True)
result = getattr(cat, function)(skipna=skipna)
if skipna is False:
assert result is np.nan
else:
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_only_nan(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Categorical([np.nan], categories=[1, 2], ordered=True)
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("method", ["min", "max"])
def test_numeric_only_min_max_raises(self, method):
# GH 25303
cat = Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
with pytest.raises(TypeError, match=".* got an unexpected keyword"):
getattr(cat, method)(numeric_only=True)
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_raises(self, method):
cat = Categorical(["a", "b", "c", "b"], ordered=False)
msg = (
f"Categorical is not ordered for operation {method}\n"
"you can use .as_ordered() to change the Categorical to an ordered one"
)
method = getattr(np, method)
with pytest.raises(TypeError, match=re.escape(msg)):
method(cat)
@pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"])
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
msg = (
f"the '{kwarg}' parameter is not supported in the pandas implementation "
f"of {method}"
)
if kwarg == "axis":
msg = r"`axis` must be fewer than the number of dimensions \(1\)"
kwargs = {kwarg: 42}
method = getattr(np, method)
with pytest.raises(ValueError, match=msg):
method(cat, **kwargs)
@pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")])
def test_numpy_min_max_axis_equals_none(self, method, expected):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
method = getattr(np, method)
result = method(cat, axis=None)
assert result == expected
@pytest.mark.parametrize(
"values,categories,exp_mode",
[
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
],
)
def test_mode(self, values, categories, exp_mode):
cat = Categorical(values, categories=categories, ordered=True)
res = Series(cat).mode()._values
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
def test_searchsorted(self, ordered):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
cat = Categorical(
["cheese", "milk", "apple", "bread", "bread"],
categories=["cheese", "milk", "apple", "bread"],
ordered=ordered,
)
ser = Series(cat)
# Searching for single item argument, side='left' (default)
res_cat = cat.searchsorted("apple")
assert res_cat == 2
assert is_scalar(res_cat)
res_ser = ser.searchsorted("apple")
assert res_ser == 2
assert is_scalar(res_ser)
# Searching for single item array, side='left' (default)
res_cat = cat.searchsorted(["bread"])
res_ser = ser.searchsorted(["bread"])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = cat.searchsorted(["apple", "bread"], side="right")
res_ser = ser.searchsorted(["apple", "bread"], side="right")
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
with pytest.raises(TypeError, match="cucumber"):
cat.searchsorted("cucumber")
with pytest.raises(TypeError, match="cucumber"):
ser.searchsorted("cucumber")
# Searching for multiple values one of each is not from the Categorical
msg = (
"Cannot setitem on a Categorical with a new category, "
"set the categories first"
)
with pytest.raises(TypeError, match=msg):
cat.searchsorted(["bread", "cucumber"])
with pytest.raises(TypeError, match=msg):
ser.searchsorted(["bread", "cucumber"])
def test_unique(self, ordered):
# GH38140
dtype = CategoricalDtype(["a", "b", "c"], ordered=ordered)
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b", "c"], dtype=dtype)
res = cat.unique()
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], dtype=dtype)
res = cat.unique()
tm.assert_categorical_equal(res, Categorical(["a", "b"], dtype=dtype))
cat = Categorical(["c", "a", "b", "a", "a"], dtype=dtype)
res = cat.unique()
exp_cat = Categorical(["c", "a", "b"], dtype=dtype)
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"], dtype=dtype)
res = cat.unique()
exp_cat = Categorical(["b", np.nan, "a"], dtype=dtype)
tm.assert_categorical_equal(res, exp_cat)
def test_unique_index_series(self, ordered):
# GH38140
dtype = CategoricalDtype([3, 2, 1], ordered=ordered)
c = Categorical([3, 1, 2, 2, 1], dtype=dtype)
# Categorical.unique sorts categories by appearance order
# if ordered=False
exp = Categorical([3, 1, 2], dtype=dtype)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([1, 1, 2, 2], dtype=dtype)
exp = Categorical([1, 2], dtype=dtype)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
def test_shift(self):
# GH 9416
cat = Categorical(["a", "b", "c", "d", "a"])
# shift forward
sp1 = cat.shift(1)
xp1 = Categorical([np.nan, "a", "b", "c", "d"])
tm.assert_categorical_equal(sp1, xp1)
tm.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = Categorical(
["c", "d", "a", np.nan, np.nan], categories=["a", "b", "c", "d"]
)
tm.assert_categorical_equal(sn2, xp2)
tm.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
tm.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = Categorical([1, 2, 3])
exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories
assert cat.nbytes == exp
def test_memory_usage(self, using_infer_string):
cat = Categorical([1, 2, 3])
# .categories is an index, so we include the hashtable
assert 0 < cat.nbytes <= cat.memory_usage()
assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
cat = Categorical(["foo", "foo", "bar"])
if using_infer_string:
if cat.categories.dtype.storage == "python":
assert cat.memory_usage(deep=True) > cat.nbytes
else:
assert cat.memory_usage(deep=True) >= cat.nbytes
else:
assert cat.memory_usage(deep=True) > cat.nbytes
if not PYPY:
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
assert abs(diff) < 100
def test_map(self):
c = Categorical(list("ABABC"), categories=list("CBA"), ordered=True)
result = c.map(lambda x: x.lower(), na_action=None)
exp = Categorical(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_categorical_equal(result, exp)
c = Categorical(list("ABABC"), categories=list("ABC"), ordered=False)
result = c.map(lambda x: x.lower(), na_action=None)
exp = Categorical(list("ababc"), categories=list("abc"), ordered=False)
tm.assert_categorical_equal(result, exp)
result = c.map(lambda x: 1, na_action=None)
# GH 12766: Return an index not an array
tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_inplace_raises(self, value):
cat = Categorical(["A", "B", "B", "C", "A"])
msg = (
'For argument "inplace" expected type bool, '
f"received type {type(value).__name__}"
)
with pytest.raises(ValueError, match=msg):
cat.sort_values(inplace=value)
def test_quantile_empty(self):
# make sure we have correct itemsize on resulting codes
cat = Categorical(["A", "B"])
idx = Index([0.0, 0.5])
result = cat[:0]._quantile(idx, interpolation="linear")
assert result._codes.dtype == np.int8
expected = cat.take([-1, -1], allow_fill=True)
tm.assert_extension_array_equal(result, expected)
|
TestCategoricalAnalytics
|
python
|
getsentry__sentry
|
tests/sentry/relocation/api/endpoints/test_unpause.py
|
{
"start": 596,
"end": 14315
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-relocations-unpause"
method = "put"
def setUp(self) -> None:
super().setUp()
self.owner = self.create_user(
email="owner", is_superuser=False, is_staff=True, is_active=True
)
self.superuser = self.create_user(is_superuser=True)
self.staff_user = self.create_user(is_staff=True)
self.relocation: Relocation = Relocation.objects.create(
date_added=TEST_DATE_ADDED,
creator_id=self.superuser.id,
owner_id=self.owner.id,
status=Relocation.Status.PAUSE.value,
step=Relocation.Step.PREPROCESSING.value,
provenance=Relocation.Provenance.SELF_HOSTED.value,
want_org_slugs=["foo"],
want_usernames=["alice", "bob"],
latest_notified=Relocation.EmailKind.STARTED.value,
latest_task=OrderedTask.PREPROCESSING_SCAN.name,
latest_task_attempts=1,
)
@override_options({"staff.ga-rollout": True})
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_good_staff_unpause_until_validating(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.staff_user, staff=True)
response = self.get_success_response(
self.relocation.uuid, untilStep=Relocation.Step.VALIDATING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.PREPROCESSING.name
assert response.data["scheduledPauseAtStep"] == Relocation.Step.VALIDATING.name
assert async_task_scheduled.call_count == 1
assert async_task_scheduled.call_args.args == (str(self.relocation.uuid),)
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_good_unpause_until_validating(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_success_response(
self.relocation.uuid, untilStep=Relocation.Step.VALIDATING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.PREPROCESSING.name
assert response.data["scheduledPauseAtStep"] == Relocation.Step.VALIDATING.name
assert async_task_scheduled.call_count == 1
assert async_task_scheduled.call_args.args == (str(self.relocation.uuid),)
@patch("sentry.relocation.tasks.process.validating_start.delay")
def test_good_unpause_until_importing(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.step = Relocation.Step.VALIDATING.value
self.relocation.save()
response = self.get_success_response(
self.relocation.uuid, untilStep=Relocation.Step.IMPORTING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.VALIDATING.name
assert response.data["scheduledPauseAtStep"] == Relocation.Step.IMPORTING.name
assert async_task_scheduled.call_count == 1
assert async_task_scheduled.call_args.args == (str(self.relocation.uuid),)
@patch("sentry.relocation.tasks.process.importing.delay")
def test_good_unpause_until_postprocessing(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.step = Relocation.Step.IMPORTING.value
self.relocation.save()
response = self.get_success_response(
self.relocation.uuid, untilStep=Relocation.Step.POSTPROCESSING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.IMPORTING.name
assert response.data["scheduledPauseAtStep"] == Relocation.Step.POSTPROCESSING.name
assert async_task_scheduled.call_count == 1
assert async_task_scheduled.call_args.args == (str(self.relocation.uuid),)
@patch("sentry.relocation.tasks.process.postprocessing.delay")
def test_good_unpause_until_notifying(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.step = Relocation.Step.POSTPROCESSING.value
self.relocation.save()
response = self.get_success_response(
self.relocation.uuid, untilStep=Relocation.Step.NOTIFYING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.POSTPROCESSING.name
assert response.data["scheduledPauseAtStep"] == Relocation.Step.NOTIFYING.name
assert async_task_scheduled.call_count == 1
assert async_task_scheduled.call_args.args == (str(self.relocation.uuid),)
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_good_change_pending_pause_later(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.status = Relocation.Status.IN_PROGRESS.value
self.relocation.step = Relocation.Step.VALIDATING.value
self.relocation.scheduled_pause_at_step = Relocation.Step.POSTPROCESSING.value
self.relocation.save()
response = self.get_success_response(
self.relocation.uuid, untilStep=Relocation.Step.NOTIFYING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.VALIDATING.name
assert response.data["scheduledPauseAtStep"] == Relocation.Step.NOTIFYING.name
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_good_change_pending_pause_sooner(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.status = Relocation.Status.IN_PROGRESS.value
self.relocation.step = Relocation.Step.VALIDATING.value
self.relocation.scheduled_pause_at_step = Relocation.Step.POSTPROCESSING.value
self.relocation.save()
response = self.get_success_response(
self.relocation.uuid, untilStep=Relocation.Step.IMPORTING.name, status_code=200
)
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.VALIDATING.name
assert response.data["scheduledPauseAtStep"] == Relocation.Step.IMPORTING.name
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_good_remove_pending_pause(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.status = Relocation.Status.IN_PROGRESS.value
self.relocation.step = Relocation.Step.VALIDATING.value
self.relocation.scheduled_pause_at_step = Relocation.Step.POSTPROCESSING.value
self.relocation.save()
response = self.get_success_response(self.relocation.uuid, status_code=200)
assert response.status_code == 200
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.VALIDATING.name
assert response.data["scheduledPauseAtStep"] is None
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.notifying_unhide.delay")
def test_good_unpause_no_follow_up_step(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.step = Relocation.Step.NOTIFYING.value
self.relocation.save()
response = self.get_success_response(self.relocation.uuid, status_code=200)
assert response.status_code == 200
assert response.data["status"] == Relocation.Status.IN_PROGRESS.name
assert response.data["step"] == Relocation.Step.NOTIFYING.name
assert not response.data["scheduledPauseAtStep"]
assert async_task_scheduled.call_count == 1
assert async_task_scheduled.call_args.args == (str(self.relocation.uuid),)
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_bad_not_found(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
does_not_exist_uuid = uuid4().hex
self.get_error_response(does_not_exist_uuid, status_code=404)
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_bad_already_completed(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.status = Relocation.Status.FAILURE.value
self.relocation.save()
response = self.get_error_response(self.relocation.uuid, status_code=400)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_NOT_UNPAUSABLE_STATUS.substitute(
status=Relocation.Status.FAILURE.name
)
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_bad_already_paused(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
self.relocation.status = Relocation.Status.IN_PROGRESS.value
self.relocation.save()
response = self.get_error_response(self.relocation.uuid, status_code=400)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_NOT_UNPAUSABLE_STATUS.substitute(
status=Relocation.Status.IN_PROGRESS.name
)
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_bad_invalid_step(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_error_response(
self.relocation.uuid, untilStep="nonexistent", status_code=400
)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_UNKNOWN_RELOCATION_STEP.substitute(
step="nonexistent"
)
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_bad_unknown_step(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_error_response(
self.relocation.uuid, untilStep=Relocation.Step.UNKNOWN.name, status_code=400
)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_COULD_NOT_PAUSE_RELOCATION_AT_STEP.substitute(
step=Relocation.Step.UNKNOWN.name
)
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_bad_current_step(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_error_response(
self.relocation.uuid, untilStep=Relocation.Step.PREPROCESSING.name, status_code=400
)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_COULD_NOT_PAUSE_RELOCATION_AT_STEP.substitute(
step=Relocation.Step.PREPROCESSING.name
)
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_bad_past_step(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_error_response(
self.relocation.uuid, untilStep=Relocation.Step.UPLOADING.name, status_code=400
)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_COULD_NOT_PAUSE_RELOCATION_AT_STEP.substitute(
step=Relocation.Step.UPLOADING.name
)
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_bad_last_step(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_error_response(
self.relocation.uuid, untilStep=Relocation.Step.COMPLETED.name, status_code=400
)
assert response.data.get("detail") is not None
assert response.data.get("detail") == ERR_COULD_NOT_PAUSE_RELOCATION_AT_STEP.substitute(
step=Relocation.Step.COMPLETED.name
)
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_bad_no_auth(self, async_task_scheduled: Mock) -> None:
self.get_error_response(self.relocation.uuid, status_code=401)
assert async_task_scheduled.call_count == 0
@patch("sentry.relocation.tasks.process.preprocessing_scan.delay")
def test_bad_no_superuser(self, async_task_scheduled: Mock) -> None:
self.login_as(user=self.superuser, superuser=False)
self.get_error_response(self.relocation.uuid, status_code=403)
assert async_task_scheduled.call_count == 0
|
UnpauseRelocationTest
|
python
|
doocs__leetcode
|
solution/2900-2999/2919.Minimum Increment Operations to Make Array Beautiful/Solution.py
|
{
"start": 0,
"end": 216
}
|
class ____:
def minIncrementOperations(self, nums: List[int], k: int) -> int:
f = g = h = 0
for x in nums:
f, g, h = g, h, min(f, g, h) + max(k - x, 0)
return min(f, g, h)
|
Solution
|
python
|
django__django
|
tests/generic_relations_regress/models.py
|
{
"start": 887,
"end": 1247
}
|
class ____(models.Model):
street = models.CharField(max_length=80)
city = models.CharField(max_length=50)
state = models.CharField(max_length=2)
zipcode = models.CharField(max_length=5)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
|
Address
|
python
|
lxml__lxml
|
src/lxml/html/__init__.py
|
{
"start": 54291,
"end": 56922
}
|
class ____(InputMixin, HtmlElement):
"""
Represents an ``<input>`` element.
You can get the type with ``.type`` (which is lower-cased and
defaults to ``'text'``).
Also you can get and set the value with ``.value``
Checkboxes and radios have the attribute ``input.checkable ==
True`` (for all others it is false) and a boolean attribute
``.checked``.
"""
## FIXME: I'm a little uncomfortable with the use of .checked
@property
def value(self):
"""
Get/set the value of this element, using the ``value`` attribute.
Also, if this is a checkbox and it has no value, this defaults
to ``'on'``. If it is a checkbox or radio that is not
checked, this returns None.
"""
if self.checkable:
if self.checked:
return self.get('value') or 'on'
else:
return None
return self.get('value')
@value.setter
def value(self, value):
if self.checkable:
if not value:
self.checked = False
else:
self.checked = True
if isinstance(value, str):
self.set('value', value)
else:
self.set('value', value)
@value.deleter
def value(self):
if self.checkable:
self.checked = False
else:
if 'value' in self.attrib:
del self.attrib['value']
@property
def type(self):
"""
Return the type of this element (using the type attribute).
"""
return self.get('type', 'text').lower()
@type.setter
def type(self, value):
self.set('type', value)
@property
def checkable(self):
"""
Boolean: can this element be checked?
"""
return self.type in ('checkbox', 'radio')
@property
def checked(self):
"""
Boolean attribute to get/set the presence of the ``checked``
attribute.
You can only use this on checkable input types.
"""
if not self.checkable:
raise AttributeError('Not a checkable input type')
return 'checked' in self.attrib
@checked.setter
def checked(self, value):
if not self.checkable:
raise AttributeError('Not a checkable input type')
if value:
self.set('checked', '')
else:
attrib = self.attrib
if 'checked' in attrib:
del attrib['checked']
HtmlElementClassLookup._default_element_classes['input'] = InputElement
|
InputElement
|
python
|
jina-ai__jina
|
jina/clients/base/websocket.py
|
{
"start": 693,
"end": 9839
}
|
class ____(BaseClient):
"""A Websocket Client."""
async def _is_flow_ready(self, **kwargs) -> bool:
"""Sends a dry run to the Flow to validate if the Flow is ready to receive requests
:param kwargs: kwargs coming from the public interface. Includes arguments to be passed to the `WebsocketClientlet`
:return: boolean indicating the readiness of the Flow
"""
async with AsyncExitStack() as stack:
try:
proto = 'wss' if self.args.tls else 'ws'
url = f'{proto}://{self.args.host}:{self.args.port}/dry_run'
iolet = await stack.enter_async_context(
WebsocketClientlet(
url=url,
logger=self.logger,
tracer_provider=self.tracer_provider,
**kwargs,
)
)
async def _receive():
try:
async for response in iolet.recv_dry_run():
return response
except Exception as exc:
self.logger.error(
f'Error while fetching response from Websocket server {exc!r}'
)
raise
async def _send():
return await iolet.send_dry_run()
receive_task = asyncio.create_task(_receive())
if receive_task.done():
raise RuntimeError(
'receive task not running, can not send messages'
)
try:
send_task = asyncio.create_task(_send())
_, response_result = await asyncio.gather(send_task, receive_task)
if response_result.proto.code == jina_pb2.StatusProto.SUCCESS:
return True
finally:
if iolet.close_code == status.WS_1011_INTERNAL_ERROR:
raise ConnectionError(iolet.close_message)
await receive_task
except Exception as e:
self.logger.error(
f'Error while getting response from websocket server {e!r}'
)
return False
async def _get_results(
self,
inputs: 'InputType',
on_done: 'CallbackFnType',
on_error: Optional['CallbackFnType'] = None,
on_always: Optional['CallbackFnType'] = None,
max_attempts: int = 1,
initial_backoff: float = 0.5,
max_backoff: float = 0.1,
backoff_multiplier: float = 1.5,
results_in_order: bool = False,
prefetch: Optional[int] = None,
**kwargs,
):
"""
:param inputs: the callable
:param on_done: the callback for on_done
:param on_error: the callback for on_error
:param on_always: the callback for on_always
:param max_attempts: Number of sending attempts, including the original request.
:param initial_backoff: The first retry will happen with a delay of random(0, initial_backoff)
:param max_backoff: The maximum accepted backoff after the exponential incremental delay
:param backoff_multiplier: The n-th attempt will occur at random(0, min(initialBackoff*backoffMultiplier**(n-1), maxBackoff))
:param results_in_order: return the results in the same order as the inputs
:param prefetch: How many Requests are processed from the Client at the same time.
:param kwargs: kwargs coming from the public interface. Includes arguments to be passed to the `WebsocketClientlet`
:yields: generator over results
"""
with ImportExtensions(required=True):
pass
request_iterator, inputs_length = self._get_requests(inputs=inputs, **kwargs)
async with AsyncExitStack() as stack:
cm1 = ProgressBar(
total_length=inputs_length, disable=not (self.show_progress)
)
p_bar = stack.enter_context(cm1)
proto = 'wss' if self.args.tls else 'ws'
url = f'{proto}://{self.args.host}:{self.args.port}/'
iolet = await stack.enter_async_context(
WebsocketClientlet(
url=url,
logger=self.logger,
tracer_provider=self.tracer_provider,
max_attempts=max_attempts,
initial_backoff=initial_backoff,
max_backoff=max_backoff,
backoff_multiplier=backoff_multiplier,
**kwargs,
)
)
request_buffer: Dict[str, asyncio.Future] = (
dict()
) # maps request_ids to futures (tasks)
def _result_handler(result):
return result
async def _receive():
def _response_handler(response):
if response.header.request_id in request_buffer:
future = request_buffer.pop(response.header.request_id)
future.set_result(response)
else:
self.logger.warning(
f'discarding unexpected response with request id {response.header.request_id}'
)
"""Await messages from WebsocketGateway and process them in the request buffer"""
try:
async for response in iolet.recv_message():
_response_handler(response)
finally:
if request_buffer:
self.logger.warning(
f'{self.__class__.__name__} closed, cancelling all outstanding requests'
)
for future in request_buffer.values():
future.cancel()
request_buffer.clear()
def _handle_end_of_iter():
"""Send End of iteration signal to the Gateway"""
asyncio.create_task(iolet.send_eoi())
def _request_handler(
request: 'Request', **kwargs
) -> 'Tuple[asyncio.Future, Optional[asyncio.Future]]':
"""
For each request in the iterator, we send the `Message` using `iolet.send_message()`.
For websocket requests from client, for each request in the iterator, we send the request in `bytes`
using `iolet.send_message()`.
Then add {<request-id>: <an-empty-future>} to the request buffer.
This empty future is used to track the `result` of this request during `receive`.
:param request: current request in the iterator
:param kwargs: kwargs
:return: asyncio Future for sending message
"""
future = get_or_reuse_loop().create_future()
request_buffer[request.header.request_id] = future
asyncio.create_task(iolet.send_message(request))
return future, None
streamer_args = vars(self.args)
if prefetch:
streamer_args['prefetch'] = prefetch
streamer = RequestStreamer(
request_handler=_request_handler,
result_handler=_result_handler,
end_of_iter_handler=_handle_end_of_iter,
logger=self.logger,
**streamer_args,
)
receive_task = asyncio.create_task(_receive())
exception_raised = None
if receive_task.done():
raise RuntimeError('receive task not running, can not send messages')
try:
async for response in streamer.stream(
request_iterator=request_iterator,
results_in_order=results_in_order,
):
callback_exec(
response=response,
logger=self.logger,
on_error=on_error,
on_done=on_done,
on_always=on_always,
continue_on_error=self.continue_on_error,
)
if self.show_progress:
p_bar.update()
yield response
except Exception as ex:
exception_raised = ex
try:
receive_task.cancel()
except:
raise ex
finally:
if iolet.close_code == status.WS_1011_INTERNAL_ERROR:
raise ConnectionError(iolet.close_message)
try:
await receive_task
except asyncio.CancelledError:
if exception_raised is not None:
raise exception_raised
else:
raise
|
WebSocketBaseClient
|
python
|
apache__airflow
|
providers/teradata/src/airflow/providers/teradata/hooks/teradata.py
|
{
"start": 3526,
"end": 11572
}
|
class ____(DbApiHook):
"""
General hook for interacting with Teradata SQL Database.
This module contains basic APIs to connect to and interact with Teradata SQL Database. It uses teradatasql
client internally as a database driver for connecting to Teradata database. The config parameters like
Teradata DB Server URL, username, password and database name are fetched from the predefined connection
config connection_id. It raises an airflow error if the given connection id doesn't exist.
You can also specify ssl parameters in the extra field of your connection
as ``{"sslmode": "require", "sslcert": "/path/to/cert.pem", etc}``.
.. seealso::
- :ref:`Teradata API connection <howto/connection:teradata>`
:param args: passed to DbApiHook
:param database: The Teradata database to connect to.
:param kwargs: passed to DbApiHook
"""
# Override to provide the connection name.
conn_name_attr = "teradata_conn_id"
# Override to have a default connection id for a particular dbHook
default_conn_name = "teradata_default"
# Override if this db supports autocommit.
supports_autocommit = True
# Override if this db supports executemany.
supports_executemany = True
# Override this for hook to have a custom name in the UI selection
conn_type = "teradata"
# Override hook name to give descriptive name for hook
hook_name = "Teradata"
# Override with the Teradata specific placeholder parameter string used for insert queries
placeholder: str = "?"
# Override SQL query to be used for testing database connection
_test_connection_sql = "select 1"
def __init__(
self,
*args,
database: str | None = None,
**kwargs,
) -> None:
super().__init__(*args, schema=database, **kwargs)
def get_conn(self) -> TeradataConnection:
"""
Create and return a Teradata Connection object using teradatasql client.
Establishes connection to a Teradata SQL database using config corresponding to teradata_conn_id.
:return: a Teradata connection object
"""
teradata_conn_config: dict = self._get_conn_config_teradatasql()
query_band_text = None
if "query_band" in teradata_conn_config:
query_band_text = teradata_conn_config.pop("query_band")
teradata_conn = teradatasql.connect(**teradata_conn_config)
# setting query band
self.set_query_band(query_band_text, teradata_conn)
return teradata_conn
def set_query_band(self, query_band_text, teradata_conn):
"""Set SESSION Query Band for each connection session."""
try:
query_band_text = _handle_user_query_band_text(query_band_text)
set_query_band_sql = f"SET QUERY_BAND='{query_band_text}' FOR SESSION"
with teradata_conn.cursor() as cur:
cur.execute(set_query_band_sql)
except Exception as ex:
self.log.error("Error occurred while setting session query band: %s ", str(ex))
def _get_conn_config_teradatasql(self) -> dict[str, Any]:
"""Return set of config params required for connecting to Teradata DB using teradatasql client."""
conn: Connection = self.get_connection(self.get_conn_id())
conn_config = {
"host": conn.host or "localhost",
"dbs_port": conn.port or DEFAULT_DB_PORT,
"database": conn.schema or "",
"user": conn.login or "dbc",
"password": conn.password or "dbc",
}
if conn.extra_dejson.get("tmode", False):
conn_config["tmode"] = conn.extra_dejson["tmode"]
# Handling SSL connection parameters
if conn.extra_dejson.get("sslmode", False):
conn_config["sslmode"] = conn.extra_dejson["sslmode"]
if "verify" in str(conn_config["sslmode"]):
if conn.extra_dejson.get("sslca", False):
conn_config["sslca"] = conn.extra_dejson["sslca"]
if conn.extra_dejson.get("sslcapath", False):
conn_config["sslcapath"] = conn.extra_dejson["sslcapath"]
if conn.extra_dejson.get("sslcipher", False):
conn_config["sslcipher"] = conn.extra_dejson["sslcipher"]
if conn.extra_dejson.get("sslcrc", False):
conn_config["sslcrc"] = conn.extra_dejson["sslcrc"]
if conn.extra_dejson.get("sslprotocol", False):
conn_config["sslprotocol"] = conn.extra_dejson["sslprotocol"]
if conn.extra_dejson.get("query_band", False):
conn_config["query_band"] = conn.extra_dejson["query_band"]
return conn_config
@property
def sqlalchemy_url(self) -> URL:
"""
Override to return a Sqlalchemy.engine.URL object from the Teradata connection.
:return: the extracted sqlalchemy.engine.URL object.
"""
connection = self.get_connection(self.get_conn_id())
# Adding only teradatasqlalchemy supported connection parameters.
# https://pypi.org/project/teradatasqlalchemy/#ConnectionParameters
return URL.create(
drivername="teradatasql",
username=connection.login,
password=connection.password,
host=connection.host,
port=connection.port,
database=connection.schema if connection.schema else None,
)
def get_uri(self) -> str:
"""Override DbApiHook get_uri method for get_sqlalchemy_engine()."""
return self.sqlalchemy_url.render_as_string()
@staticmethod
def get_ui_field_behaviour() -> dict:
"""Return custom field behaviour."""
import json
return {
"hidden_fields": ["port"],
"relabeling": {
"host": "Database Server URL",
"schema": "Database Name",
"login": "Username",
},
"placeholders": {
"extra": json.dumps(
{"tmode": "TERA", "sslmode": "verify-ca", "sslca": "/tmp/server-ca.pem"}, indent=4
),
"login": "dbc",
"password": "dbc",
},
}
def callproc(
self,
identifier: str,
autocommit: bool = False,
parameters: list | dict | None = None,
) -> list | dict | tuple | None:
"""
Call the stored procedure identified by the provided string.
Any OUT parameters must be provided with a value of either the
expected Python type (e.g., `int`) or an instance of that type.
:param identifier: stored procedure name
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:param parameters: The `IN`, `OUT` and `INOUT` parameters for Teradata
stored procedure
The return value is a list or mapping that includes parameters in
both directions; the actual return type depends on the type of the
provided `parameters` argument.
"""
if parameters is None:
parameters = []
args = ",".join("?" for name in parameters)
sql = f"{{CALL {identifier}({(args)})}}"
def handler(cursor):
records = cursor.fetchall()
if records is None:
return
if isinstance(records, list):
return [row for row in records]
if isinstance(records, dict):
return {n: v for (n, v) in records.items()}
raise TypeError(f"Unexpected results: {records}")
result = self.run(
sql,
autocommit=autocommit,
parameters=(
[_map_param(value) for (name, value) in parameters.items()]
if isinstance(parameters, dict)
else [_map_param(value) for value in parameters]
),
handler=handler,
)
return result
|
TeradataHook
|
python
|
huggingface__transformers
|
src/transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py
|
{
"start": 19718,
"end": 22832
}
|
class ____(HunYuanDenseV1PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = HunYuanDenseV1Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, HunYuanDenseV1ForCausalLM
>>> model = HunYuanDenseV1ForCausalLM.from_pretrained("meta-hunyuan_v1_dense/HunYuanDenseV1-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-hunyuan_v1_dense/HunYuanDenseV1-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
HunYuanDenseV1ForCausalLM
|
python
|
walkccc__LeetCode
|
solutions/1854. Maximum Population Year/1854.py
|
{
"start": 0,
"end": 565
}
|
class ____:
def maximumPopulation(self, logs: list[list[int]]) -> int:
MIN_YEAR = 1950
MAX_YEAR = 2050
ans = 0
maxPopulation = 0
runningPopulation = 0
# population[i] := the population of year i
population = [0] * (MAX_YEAR + 1)
for birth, death in logs:
population[birth] += 1
population[death] -= 1
for year in range(MIN_YEAR, MAX_YEAR + 1):
runningPopulation += population[year]
if runningPopulation > maxPopulation:
maxPopulation = runningPopulation
ans = year
return ans
|
Solution
|
python
|
fluentpython__example-code
|
11-iface-abc/tombola.py
|
{
"start": 33,
"end": 845
}
|
class ____(abc.ABC): # <1>
@abc.abstractmethod
def load(self, iterable): # <2>
"""Add items from an iterable."""
@abc.abstractmethod
def pick(self): # <3>
"""Remove item at random, returning it.
This method should raise `LookupError` when the instance is empty.
"""
def loaded(self): # <4>
"""Return `True` if there's at least 1 item, `False` otherwise."""
return bool(self.inspect()) # <5>
def inspect(self):
"""Return a sorted tuple with the items currently inside."""
items = []
while True: # <6>
try:
items.append(self.pick())
except LookupError:
break
self.load(items) # <7>
return tuple(sorted(items))
# END TOMBOLA_ABC
|
Tombola
|
python
|
jazzband__django-polymorphic
|
example/orders/models.py
|
{
"start": 1385,
"end": 1652
}
|
class ____(Payment):
"""
Payment by bank
"""
bank_name = models.CharField(max_length=100)
swift = models.CharField(max_length=20)
class Meta:
verbose_name = _("Bank Payment")
verbose_name_plural = _("Bank Payments")
|
BankPayment
|
python
|
huggingface__transformers
|
tests/models/blenderbot/test_modeling_blenderbot.py
|
{
"start": 20071,
"end": 21254
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (BlenderbotDecoder, BlenderbotForCausalLM) if is_torch_available() else ()
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = BlenderbotStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=BlenderbotConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
return
@unittest.skip(reason="Decoder cannot keep gradients")
def test_flex_attention_with_grads():
return
|
BlenderbotStandaloneDecoderModelTest
|
python
|
numba__numba
|
numba/tests/test_generators.py
|
{
"start": 2290,
"end": 9257
}
|
class ____(MemoryLeakMixin, TestCase):
def check_generator(self, pygen, cgen):
self.assertEqual(next(cgen), next(pygen))
# Use list comprehensions to make sure we trash the generator's
# former C stack.
expected = [x for x in pygen]
got = [x for x in cgen]
self.assertEqual(expected, got)
with self.assertRaises(StopIteration):
next(cgen)
def check_gen1(self, **kwargs):
pyfunc = gen1
cr = jit((types.int32,), **kwargs)(pyfunc)
pygen = pyfunc(8)
cgen = cr(8)
self.check_generator(pygen, cgen)
def test_gen1(self):
self.check_gen1(**nopython_flags)
def test_gen1_objmode(self):
self.check_gen1(**forceobj_flags)
def check_gen2(self, **kwargs):
pyfunc = gen2
cr = jit((types.int32,), **kwargs)(pyfunc)
pygen = pyfunc(8)
cgen = cr(8)
self.check_generator(pygen, cgen)
def test_gen2(self):
self.check_gen2(**nopython_flags)
def test_gen2_objmode(self):
self.check_gen2(**forceobj_flags)
def check_gen3(self, **kwargs):
pyfunc = gen3
cr = jit((types.int32,), **kwargs)(pyfunc)
pygen = pyfunc(8)
cgen = cr(8)
self.check_generator(pygen, cgen)
def test_gen3(self):
self.check_gen3(**nopython_flags)
def test_gen3_objmode(self):
self.check_gen3(**forceobj_flags)
def check_gen4(self, **kwargs):
pyfunc = gen4
cr = jit((types.int32,) * 3, **kwargs)(pyfunc)
pygen = pyfunc(5, 6, 7)
cgen = cr(5, 6, 7)
self.check_generator(pygen, cgen)
def test_gen4(self):
self.check_gen4(**nopython_flags)
def test_gen4_objmode(self):
self.check_gen4(**forceobj_flags)
def test_gen5(self):
with self.assertTypingError() as raises:
jit((), **nopython_flags)(gen5)
self.assertIn("Cannot type generator: it does not yield any value",
str(raises.exception))
def test_gen5_objmode(self):
cgen = jit((), **forceobj_flags)(gen5)()
self.assertEqual(list(cgen), [])
with self.assertRaises(StopIteration):
next(cgen)
def check_gen6(self, **kwargs):
cr = jit((types.int32,) * 2, **kwargs)(gen6)
cgen = cr(5, 6)
l = []
for i in range(3):
l.append(next(cgen))
self.assertEqual(l, [14] * 3)
def test_gen6(self):
self.check_gen6(**nopython_flags)
def test_gen6_objmode(self):
self.check_gen6(**forceobj_flags)
def check_gen7(self, **kwargs):
pyfunc = gen7
cr = jit((types.Array(types.float64, 1, 'C'),), **kwargs)(pyfunc)
arr = np.linspace(1, 10, 7)
pygen = pyfunc(arr.copy())
cgen = cr(arr)
self.check_generator(pygen, cgen)
def test_gen7(self):
self.check_gen7(**nopython_flags)
def test_gen7_objmode(self):
self.check_gen7(**forceobj_flags)
def check_gen8(self, **jit_args):
pyfunc = gen8
cfunc = jit(**jit_args)(pyfunc)
def check(*args, **kwargs):
self.check_generator(pyfunc(*args, **kwargs),
cfunc(*args, **kwargs))
check(2, 3)
check(4)
check(y=5)
check(x=6, b=True)
def test_gen8(self):
self.check_gen8(nopython=True)
def test_gen8_objmode(self):
self.check_gen8(forceobj=True)
def check_gen9(self, **kwargs):
pyfunc = gen_bool
cr = jit((), **kwargs)(pyfunc)
pygen = pyfunc()
cgen = cr()
self.check_generator(pygen, cgen)
def test_gen9(self):
self.check_gen9(**nopython_flags)
def test_gen9_objmode(self):
self.check_gen9(**forceobj_flags)
def check_consume_generator(self, gen_func):
cgen = jit(nopython=True)(gen_func)
cfunc = jit(nopython=True)(make_consumer(cgen))
pyfunc = make_consumer(gen_func)
expected = pyfunc(5)
got = cfunc(5)
self.assertPreciseEqual(got, expected)
def test_consume_gen1(self):
self.check_consume_generator(gen1)
def test_consume_gen2(self):
self.check_consume_generator(gen2)
def test_consume_gen3(self):
self.check_consume_generator(gen3)
# Check generator storage of some types
def check_ndindex(self, **kwargs):
pyfunc = gen_ndindex
cr = jit((types.UniTuple(types.intp, 2),), **kwargs)(pyfunc)
shape = (2, 3)
pygen = pyfunc(shape)
cgen = cr(shape)
self.check_generator(pygen, cgen)
def test_ndindex(self):
self.check_ndindex(**nopython_flags)
def test_ndindex_objmode(self):
self.check_ndindex(**forceobj_flags)
def check_np_flat(self, pyfunc, **kwargs):
cr = jit((types.Array(types.int32, 2, "C"),), **kwargs)(pyfunc)
arr = np.arange(6, dtype=np.int32).reshape((2, 3))
self.check_generator(pyfunc(arr), cr(arr))
crA = jit((types.Array(types.int32, 2, "A"),), **kwargs)(pyfunc)
arr = arr.T
self.check_generator(pyfunc(arr), crA(arr))
def test_np_flat(self):
self.check_np_flat(gen_flat, **nopython_flags)
def test_np_flat_objmode(self):
self.check_np_flat(gen_flat, **forceobj_flags)
def test_ndenumerate(self):
self.check_np_flat(gen_ndenumerate, **nopython_flags)
def test_ndenumerate_objmode(self):
self.check_np_flat(gen_ndenumerate, **forceobj_flags)
def test_type_unification_error(self):
pyfunc = gen_unification_error
with self.assertTypingError() as raises:
jit((), **nopython_flags)(pyfunc)
msg = ("Can't unify yield type from the following types: complex128, "
"none")
self.assertIn(msg, str(raises.exception))
def test_optional_expansion_type_unification_error(self):
pyfunc = gen_optional_and_type_unification_error
with self.assertTypingError() as raises:
jit((), **nopython_flags)(pyfunc)
msg = ("Can't unify yield type from the following types: complex128, "
"int%s, none")
self.assertIn(msg % types.intp.bitwidth, str(raises.exception))
def test_changing_tuple_type(self):
# test https://github.com/numba/numba/issues/7295
pyfunc = gen_changing_tuple_type
expected = list(pyfunc())
got = list(njit(pyfunc)())
self.assertEqual(expected, got)
def test_changing_number_type(self):
# additional test for https://github.com/numba/numba/issues/7295
pyfunc = gen_changing_number_type
expected = list(pyfunc())
got = list(njit(pyfunc)())
self.assertEqual(expected, got)
def nrt_gen0(ary):
for elem in ary:
yield elem
def nrt_gen1(ary1, ary2):
for e1, e2 in zip(ary1, ary2):
yield e1
yield e2
|
TestGenerators
|
python
|
sympy__sympy
|
sympy/sets/sets.py
|
{
"start": 64236,
"end": 65807
}
|
class ____(Set):
"""Represents the set of elements which are in either of the
sets and not in their intersection.
Examples
========
>>> from sympy import SymmetricDifference, FiniteSet
>>> SymmetricDifference(FiniteSet(1, 2, 3), FiniteSet(3, 4, 5))
{1, 2, 4, 5}
See Also
========
Complement, Union
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_difference
"""
is_SymmetricDifference = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return SymmetricDifference.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
result = B._symmetric_difference(A)
if result is not None:
return result
else:
return SymmetricDifference(A, B, evaluate=False)
def as_relational(self, symbol):
"""Rewrite a symmetric_difference in terms of equalities and
logic operators"""
A, B = self.args
A_rel = A.as_relational(symbol)
B_rel = B.as_relational(symbol)
return Xor(A_rel, B_rel)
@property
def is_iterable(self):
if all(arg.is_iterable for arg in self.args):
return True
def __iter__(self):
args = self.args
union = roundrobin(*(iter(arg) for arg in args))
for item in union:
count = 0
for s in args:
if item in s:
count += 1
if count % 2 == 1:
yield item
|
SymmetricDifference
|
python
|
ray-project__ray
|
python/ray/_private/log.py
|
{
"start": 930,
"end": 4780
}
|
class ____(logging.StreamHandler):
"""A plain log handler.
This handler writes to whatever sys.stderr points to at emit-time,
not at instantiation time. See docs for logging._StderrHandler.
"""
def __init__(self):
super().__init__()
self.plain_handler = logging._StderrHandler()
self.plain_handler.level = self.level
self.plain_handler.formatter = logging.Formatter(fmt="%(message)s")
def emit(self, record: logging.LogRecord):
"""Emit the log message.
If this is a worker, bypass fancy logging and just emit the log record.
If this is the driver, emit the message using the appropriate console handler.
Args:
record: Log record to be emitted
"""
import ray
if (
hasattr(ray, "_private")
and hasattr(ray._private, "worker")
and ray._private.worker.global_worker.mode
== ray._private.worker.WORKER_MODE
):
self.plain_handler.emit(record)
else:
logging._StderrHandler.emit(self, record)
logger_initialized = False
logging_config_lock = threading.Lock()
def _setup_log_record_factory():
"""Setup log record factory to add _ray_timestamp_ns to LogRecord."""
old_factory = logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
record = old_factory(*args, **kwargs)
# Python logging module starts to use `time.time_ns()` to generate `created`
# from Python 3.13 to avoid the precision loss caused by the float type.
# Here, we generate the `created` for the LogRecord to support older Python
# versions.
ct = time.time_ns()
record.created = ct / 1e9
record.__dict__[INTERNAL_TIMESTAMP_LOG_KEY] = ct
return record
logging.setLogRecordFactory(record_factory)
def generate_logging_config():
"""Generate the default Ray logging configuration."""
with logging_config_lock:
global logger_initialized
if logger_initialized:
return
logger_initialized = True
plain_formatter = logging.Formatter(
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s"
)
default_handler = PlainRayHandler()
default_handler.setFormatter(plain_formatter)
ray_logger = logging.getLogger("ray")
ray_logger.setLevel(logging.INFO)
ray_logger.addHandler(default_handler)
ray_logger.propagate = False
# Special handling for ray.rllib: only warning-level messages passed through
# See https://github.com/ray-project/ray/pull/31858 for related PR
rllib_logger = logging.getLogger("ray.rllib")
rllib_logger.setLevel(logging.WARN)
# Set up the LogRecord factory.
_setup_log_record_factory()
def setup_process_exit_logger(
process_exit_log_path: str,
level: int = logging.INFO,
formatter: Optional[logging.Formatter] = None,
) -> logging.Logger:
"""Configure and return the 'ray.process_exit' logger with a FileHandler."""
logger = logging.getLogger("ray.process_exit")
logger.setLevel(level)
logger.propagate = False
fh = logging.FileHandler(process_exit_log_path, encoding="utf-8")
if formatter is None:
formatter = logging.Formatter(
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s"
)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def format_returncode(rc: Optional[int]) -> str:
"""Return a consistent string for process return code."""
if rc is None:
return "None"
try:
rc_int = int(rc)
except Exception:
return str(rc)
if rc_int < 0:
return f"{rc_int} (signal {-rc_int})"
return f"{rc_int}"
|
PlainRayHandler
|
python
|
aimacode__aima-python
|
csp.py
|
{
"start": 23830,
"end": 28186
}
|
class ____(CSP):
"""
Make a CSP for the nQueens problem for search with min_conflicts.
Suitable for large n, it uses only data structures of size O(n).
Think of placing queens one per column, from left to right.
That means position (x, y) represents (var, val) in the CSP.
The main structures are three arrays to count queens that could conflict:
rows[i] Number of queens in the ith row (i.e. val == i)
downs[i] Number of queens in the \ diagonal
such that their (x, y) coordinates sum to i
ups[i] Number of queens in the / diagonal
such that their (x, y) coordinates have x-y+n-1 = i
We increment/decrement these counts each time a queen is placed/moved from
a row/diagonal. So moving is O(1), as is nconflicts. But choosing
a variable, and a best value for the variable, are each O(n).
If you want, you can keep track of conflicted variables, then variable
selection will also be O(1).
>>> len(backtracking_search(NQueensCSP(8)))
8
"""
def __init__(self, n):
"""Initialize data structures for n Queens."""
CSP.__init__(self, list(range(n)), UniversalDict(list(range(n))),
UniversalDict(list(range(n))), queen_constraint)
self.rows = [0] * n
self.ups = [0] * (2 * n - 1)
self.downs = [0] * (2 * n - 1)
def nconflicts(self, var, val, assignment):
"""The number of conflicts, as recorded with each assignment.
Count conflicts in row and in up, down diagonals. If there
is a queen there, it can't conflict with itself, so subtract 3."""
n = len(self.variables)
c = self.rows[val] + self.downs[var + val] + self.ups[var - val + n - 1]
if assignment.get(var, None) == val:
c -= 3
return c
def assign(self, var, val, assignment):
"""Assign var, and keep track of conflicts."""
old_val = assignment.get(var, None)
if val != old_val:
if old_val is not None: # Remove old val if there was one
self.record_conflict(assignment, var, old_val, -1)
self.record_conflict(assignment, var, val, +1)
CSP.assign(self, var, val, assignment)
def unassign(self, var, assignment):
"""Remove var from assignment (if it is there) and track conflicts."""
if var in assignment:
self.record_conflict(assignment, var, assignment[var], -1)
CSP.unassign(self, var, assignment)
def record_conflict(self, assignment, var, val, delta):
"""Record conflicts caused by addition or deletion of a Queen."""
n = len(self.variables)
self.rows[val] += delta
self.downs[var + val] += delta
self.ups[var - val + n - 1] += delta
def display(self, assignment):
"""Print the queens and the nconflicts values (for debugging)."""
n = len(self.variables)
for val in range(n):
for var in range(n):
if assignment.get(var, '') == val:
ch = 'Q'
elif (var + val) % 2 == 0:
ch = '.'
else:
ch = '-'
print(ch, end=' ')
print(' ', end=' ')
for var in range(n):
if assignment.get(var, '') == val:
ch = '*'
else:
ch = ' '
print(str(self.nconflicts(var, val, assignment)) + ch, end=' ')
print()
# ______________________________________________________________________________
# Sudoku
def flatten(seqs):
return sum(seqs, [])
easy1 = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..'
harder1 = '4173698.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......'
_R3 = list(range(3))
_CELL = itertools.count().__next__
_BGRID = [[[[_CELL() for x in _R3] for y in _R3] for bx in _R3] for by in _R3]
_BOXES = flatten([list(map(flatten, brow)) for brow in _BGRID])
_ROWS = flatten([list(map(flatten, zip(*brow))) for brow in _BGRID])
_COLS = list(zip(*_ROWS))
_NEIGHBORS = {v: set() for v in flatten(_ROWS)}
for unit in map(set, _BOXES + _ROWS + _COLS):
for v in unit:
_NEIGHBORS[v].update(unit - {v})
|
NQueensCSP
|
python
|
django__django
|
django/test/utils.py
|
{
"start": 20458,
"end": 24100
}
|
class ____(TestContextDecorator):
"""
Act as a decorator. Override list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super().__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = set()
for check in self.new_checks:
self.registry.register(check, *getattr(check, "tags", ()))
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = set()
for check in self.deployment_checks:
self.registry.register(check, *getattr(check, "tags", ()), deploy=True)
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""
Try to do a 'xml-comparison' of want and got. Plain string comparison
doesn't always work because, for example, attribute ordering should not be
important. Ignore comment nodes, processing instructions, document type
node, and leading and trailing whitespaces.
Based on
https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r"[ \t\n][ \t\n]+")
def norm_whitespace(v):
return _norm_whitespace_re.sub(" ", v)
def child_text(element):
return "".join(
c.data for c in element.childNodes if c.nodeType == Node.TEXT_NODE
)
def children(element):
return [c for c in element.childNodes if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
return all(
check_element(want, got) for want, got in zip(want_children, got_children)
)
def first_node(document):
for node in document.childNodes:
if node.nodeType not in (
Node.COMMENT_NODE,
Node.DOCUMENT_TYPE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
):
return node
want = want.strip().replace("\\n", "\n")
got = got.strip().replace("\\n", "\n")
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith("<?xml"):
wrapper = "<root>%s</root>"
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
|
override_system_checks
|
python
|
realpython__materials
|
python-314/tstrings.py
|
{
"start": 147,
"end": 2252
}
|
class ____:
statement: str
params: list[Any]
def __init__(self, template: Template) -> None:
items, params = [], []
for item in template:
match item:
case str():
items.append(item)
case Interpolation(value, _, conversion, format_spec):
converted = convert(value, conversion)
if format_spec:
converted = format(converted, format_spec)
params.append(converted)
items.append("?")
super().__setattr__("statement", "".join(items))
super().__setattr__("params", params)
def find_users_query_v1(name: str) -> str:
"""Return a SQL query to find users by name."""
return f"SELECT * FROM users WHERE name = '{name}'"
def find_users_query_v2(name: str) -> Template:
"""Return a SQL query to find users by name."""
return t"SELECT * FROM users WHERE name = '{name}'"
def find_users(name: str) -> SQLQuery:
"""Return a SQL query to find users by name."""
return SQLQuery(t"SELECT * FROM users WHERE name = {name}")
def render(template: Template) -> str:
return "".join(
f"{text}{value}"
for text, value in zip(template.strings, template.values, strict=False)
)
def safer_render(template: Template) -> str:
items = []
for item in template:
if isinstance(item, str):
items.append(item)
else:
sanitized = str(item.value).replace("'", "''")
items.append(sanitized)
return "".join(items)
if __name__ == "__main__":
# Insecure f-strings
print(find_users_query_v1("' OR '1'='1"))
# More secure t-strings
print(find_users_query_v2("' OR '1'='1"))
# Insecure way of rendering t-strings into plain strings
print(render(find_users_query_v2("' OR '1'='1")))
# More secure way of rendering t-strings
print(safer_render(find_users_query_v2("' OR '1'='1")))
# Rendering t-strings into an alternative representation
print(find_users("' OR '1'='1"))
|
SQLQuery
|
python
|
apache__airflow
|
airflow-core/src/airflow/timetables/datasets.py
|
{
"start": 966,
"end": 1451
}
|
class ____:
"""Deprecated alias for `AssetOrTimeSchedule`."""
def __new__(cls, *, timetable, datasets) -> AssetOrTimeSchedule: # type: ignore[misc]
warnings.warn(
"DatasetOrTimeSchedule is deprecated and will be removed in Airflow 3.2. Use `airflow.timetables.AssetOrTimeSchedule` instead.",
DeprecatedImportWarning,
stacklevel=2,
)
return AssetOrTimeSchedule(timetable=timetable, assets=datasets)
|
DatasetOrTimeSchedule
|
python
|
python__mypy
|
mypyc/test/test_run.py
|
{
"start": 15619,
"end": 16379
}
|
class ____(TestRun):
"""Run the main multi-module tests in separate compilation mode.
In this mode there are multiple compilation groups, which are compiled
incrementally. Each group is compiled to a separate C file, and these C
files are compiled separately.
Each compiled module is placed into a separate compilation group, unless
overridden by a special comment. Consider this example:
# separate: [(["other.py", "other_b.py"], "stuff")]
This puts other.py and other_b.py into a compilation group named "stuff".
Any files not mentioned in the comment will get single-file groups.
"""
separate = True
test_name_suffix = "_separate"
files = ["run-multimodule.test", "run-mypy-sim.test"]
|
TestRunSeparate
|
python
|
openai__openai-python
|
src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py
|
{
"start": 662,
"end": 848
}
|
class ____(BaseModel):
data: List[Data]
has_more: bool
object: Literal["list"]
first_id: Optional[str] = None
last_id: Optional[str] = None
|
PermissionRetrieveResponse
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py
|
{
"start": 3810,
"end": 3950
}
|
class ____:
config_schema: dict[str, Any] | None = None
data_schema: dict[str, Any] | None = None
@dataclasses.dataclass
|
ActionSchemas
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_multiarray.py
|
{
"start": 4274,
"end": 9599
}
|
class ____(TestCase):
def setUp(self):
self.a = np.arange(10)
@xfail
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, "self.a[0] = 3", mydict)
assert_raises(ValueError, runstring, "self.a[0:1].itemset(3)", mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_any_base(self):
# Ensure that any base being writeable is sufficient to change flag;
# this is especially interesting for arrays from an array interface.
arr = np.arange(10)
class subclass(np.ndarray):
pass
# Create subclass so base will not be collapsed, this is OK to change
view1 = arr.view(subclass)
view2 = view1[...]
arr.flags.writeable = False
view2.flags.writeable = False
view2.flags.writeable = True # Can be set to True again.
arr = np.arange(10)
class frominterface:
def __init__(self, arr):
self.arr = arr
self.__array_interface__ = arr.__array_interface__
view1 = np.asarray(frominterface)
view2 = view1[...]
view2.flags.writeable = False
view2.flags.writeable = True
view1.flags.writeable = False
view2.flags.writeable = False
with assert_raises(ValueError):
# Must assume not writeable, since only base is not:
view2.flags.writeable = True
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b"\x00" * 100
vals = np.frombuffer(data, "B")
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype([("vals", "u1"), ("res3", "S4")])
values = np.core.records.fromstring(data, types)
vals = values["vals"]
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b"\x00" * 100)
vals = np.frombuffer(data, "B")
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype([("vals", "u1"), ("res3", "S4")])
values = np.core.records.fromstring(data, types)
vals = values["vals"]
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@skipif(IS_PYPY, reason="PyPy always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_warnonwrite(self):
a = np.arange(10)
a.flags._warn_on_write = True
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
a[1] = 10
a[2] = 10
# only warn once
assert_(len(w) == 1)
@parametrize(
"flag, flag_value, writeable",
[
("writeable", True, True),
# Delete _warn_on_write after deprecation and simplify
# the parameterization:
("_warn_on_write", True, False),
("writeable", False, False),
],
)
def test_readonly_flag_protocols(self, flag, flag_value, writeable):
a = np.arange(10)
setattr(a.flags, flag, flag_value)
class MyArr:
__array_struct__ = a.__array_struct__
assert memoryview(a).readonly is not writeable
assert a.__array_interface__["data"][1] is not writeable
assert np.asarray(MyArr()).flags.writeable is writeable
@xfail
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags["C"], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags["X"], False)
assert_equal(self.a.flags["WRITEBACKIFCOPY"], False)
@xfail # invalid dtype
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype("|S4"))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype("|S4"))
assert_(a.flags.aligned)
@xfail # structured dtypes
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
@xpassIfTorchDynamo_np # (reason="TODO: hash")
|
TestFlag
|
python
|
pytorch__pytorch
|
test/jit/test_graph_rewrite_passes.py
|
{
"start": 225,
"end": 2373
}
|
class ____(JitTestCase):
def test_fuse_linear(self):
class FunctionalLinear(torch.nn.Module):
def __init__(self, weight, bias):
super().__init__()
self.weight = weight
self.bias = bias
def forward(self, x):
res = torch.matmul(x, self.weight.t())
if self.bias is not None:
res.add_(self.bias)
return res
x1 = torch.rand(3)
w1 = torch.rand(5, 3)
b1 = torch.rand(5)
for has_bias in [True, False]:
bias = b1 if has_bias else None
model = torch.jit.trace(FunctionalLinear(w1, bias), [x1])
for node in model.graph.nodes():
if node.kind() == "aten::matmul":
source_range_1 = node.sourceRange()
torch._C._jit_pass_fuse_linear(model.graph)
for node in model.graph.nodes():
if node.kind() == "aten::linear":
source_range_2 = node.sourceRange()
FileCheck().check("aten::linear").run(model.graph)
check_not = ["aten::matmul", "aten::addmm", "aten::add_", "aten::t("]
for cn in check_not:
FileCheck().check_not(cn).run(model.graph)
self.assertTrue(source_range_1 == source_range_2)
# make sure it runs
model(x1)
# check matmuls are not fused
class Matmul(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x):
return torch.matmul(x, self.weight)
x = torch.rand(5, 6, 5)
w = torch.rand(5, 5, 100)
model = torch.jit.trace(Matmul(w), [x])
torch._C._jit_pass_fuse_linear(model.graph)
# check 3d matmul is not fused
FileCheck().check("aten::matmul").run(model.graph)
FileCheck().check_not("aten::linear").run(model.graph)
# make sure it runs
model(x)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
|
TestGraphRewritePasses
|
python
|
viewflow__viewflow
|
viewflow/workflow/flow/viewset.py
|
{
"start": 11687,
"end": 14963
}
|
class ____(BulkActionsViewsMixin, Application):
"""
Viewset includes multiples flow with common Inbox/Queue/Archive views as an
separate App into Site.
`Life demo
<https://demo.viewflow.io/workflow/inbox/>`_
Usage:
.. code-block:: python
site = Site(
viewsets=[
WorkflowAppViewset(
flow_viewsets=[
FlowViewset(HelloWorldFlow, icon="assignment"),
FlowViewset(ShipmentFlow, icon="local_shipping"),
FlowViewset(DynamicSplitFlow, icon="tune"),
]
]
)
urlpatterns = [path("", site.urls)]
"""
app_name = "workflow"
icon = "assignment"
menu_template_name = "viewflow/workflow/workflow_menu.html"
base_template_name = "viewflow/workflow/base_page.html"
def __init__(self, flow_viewsets, **kwargs):
self.flow_classes = [viewset._flow_class for viewset in flow_viewsets]
viewsets = kwargs.get("viewsets", [])
viewsets.append(NestedFlowsApp(viewsets=flow_viewsets))
kwargs["viewsets"] = viewsets
super().__init__(**kwargs)
def _get_resolver_extra(self):
return {
"app": self,
"viewset": self,
}
def filter_kwargs(self, view_class, **kwargs):
return super().filter_kwargs(
view_class, **{"flow_classes": self.flow_classes, "viewset": self, **kwargs}
)
"""
Permissions
"""
def has_view_permission(self, user, obj=None):
return any(
flow_class.instance.has_view_permission(user, obj=obj)
for flow_class in self.flow_classes
)
"""
Inbox
"""
inbox_view_class = views.WorkflowInboxListView
def get_inbox_view_kwargs(self, **kwargs):
return self.filter_kwargs(self.inbox_view_class, **kwargs)
@viewprop
def inbox_view(self):
return self.inbox_view_class.as_view(**self.get_inbox_view_kwargs())
@property
def inbox_path(self):
return path("inbox/", self.inbox_view, name="inbox")
"""
Queue
"""
queue_view_class = views.WorkflowQueueListView
def get_queue_view_kwargs(self, **kwargs):
return self.filter_kwargs(self.queue_view_class, **kwargs)
@viewprop
def queue_view(self):
return self.queue_view_class.as_view(**self.get_queue_view_kwargs())
@property
def queue_path(self):
return path("queue/", self.queue_view, name="queue")
"""
Archive
"""
archive_view_class = views.WorkflowArchiveListView
def get_archive_view_kwargs(self, **kwargs):
return self.filter_kwargs(self.archive_view_class, **kwargs)
@viewprop
def archive_view(self):
return self.archive_view_class.as_view(**self.get_archive_view_kwargs())
@property
def archive_path(self):
return path("archive/", self.archive_view, name="archive")
def get_context_data(self, request):
inbox = Task.objects.inbox(self.flow_classes, request.user)
queue = Task.objects.queue(self.flow_classes, request.user)
return {
"user_inbox": inbox,
"user_queue": queue,
}
|
WorkflowAppViewset
|
python
|
joblib__joblib
|
joblib/_parallel_backends.py
|
{
"start": 9821,
"end": 11027
}
|
class ____(ParallelBackendBase):
"""A ParallelBackend which will execute all batches sequentially.
Does not use/create any threading objects, and hence has minimal
overhead. Used when n_jobs == 1.
"""
uses_threads = True
supports_timeout = False
supports_retrieve_callback = False
supports_sharedmem = True
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel"""
if n_jobs == 0:
raise ValueError("n_jobs == 0 in Parallel has no meaning")
return 1
def submit(self, func, callback=None):
"""Schedule a func to be run"""
raise RuntimeError("Should never be called for SequentialBackend.")
def retrieve_result_callback(self, out):
raise RuntimeError("Should never be called for SequentialBackend.")
def get_nested_backend(self):
# import is not top level to avoid cyclic import errors.
from .parallel import get_active_backend
# SequentialBackend should neither change the nesting level, the
# default backend or the number of jobs. Just return the current one.
return get_active_backend()
|
SequentialBackend
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 300624,
"end": 301115
}
|
class ____(sgqlc.types.Input):
"""Ordering options for connections to get sponsor entities for
GitHub Sponsors.
"""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(SponsorOrderField), graphql_name="field")
"""The field to order sponsor entities by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
|
SponsorOrder
|
python
|
pyinstaller__pyinstaller
|
bootloader/waflib/Tools/vala.py
|
{
"start": 296,
"end": 10164
}
|
class ____(Task.Task):
vars = ["VALAC", "VALAC_VERSION", "VALAFLAGS"]
ext_out = ['.h']
def run(self):
cmd = self.env.VALAC + self.env.VALAFLAGS
resources = getattr(self, 'vala_exclude', [])
cmd.extend([a.abspath() for a in self.inputs if a not in resources])
ret = self.exec_command(cmd, cwd=self.vala_dir_node.abspath())
if ret:
return ret
if self.generator.dump_deps_node:
self.generator.dump_deps_node.write('\n'.join(self.generator.packages))
return ret
@taskgen_method
def init_vala_task(self):
self.profile = getattr(self, 'profile', 'gobject')
self.packages = packages = Utils.to_list(getattr(self, 'packages', []))
self.use = Utils.to_list(getattr(self, 'use', []))
if packages and not self.use:
self.use = packages[:]
if self.profile == 'gobject':
if not 'GOBJECT' in self.use:
self.use.append('GOBJECT')
def addflags(flags):
self.env.append_value('VALAFLAGS', flags)
if self.profile:
addflags('--profile=%s' % self.profile)
valatask = self.valatask
if hasattr(self, 'vala_dir'):
if isinstance(self.vala_dir, str):
valatask.vala_dir_node = self.path.get_bld().make_node(self.vala_dir)
try:
valatask.vala_dir_node.mkdir()
except OSError:
raise self.bld.fatal('Cannot create the vala dir %r' % valatask.vala_dir_node)
else:
valatask.vala_dir_node = self.vala_dir
else:
valatask.vala_dir_node = self.path.get_bld()
addflags('--directory=%s' % valatask.vala_dir_node.abspath())
if hasattr(self, 'thread'):
if self.profile == 'gobject':
if not 'GTHREAD' in self.use:
self.use.append('GTHREAD')
else:
Logs.warn('Profile %s means no threading support', self.profile)
self.thread = False
if self.thread:
addflags('--thread')
self.is_lib = 'cprogram' not in self.features
if self.is_lib:
addflags('--library=%s' % self.target)
h_node = valatask.vala_dir_node.find_or_declare('%s.h' % self.target)
valatask.outputs.append(h_node)
addflags('--header=%s' % h_node.name)
valatask.outputs.append(valatask.vala_dir_node.find_or_declare('%s.vapi' % self.target))
if getattr(self, 'gir', None):
gir_node = valatask.vala_dir_node.find_or_declare('%s.gir' % self.gir)
addflags('--gir=%s' % gir_node.name)
valatask.outputs.append(gir_node)
self.vala_target_glib = getattr(self, 'vala_target_glib', getattr(Options.options, 'vala_target_glib', None))
if self.vala_target_glib:
addflags('--target-glib=%s' % self.vala_target_glib)
addflags(['--define=%s' % x for x in Utils.to_list(getattr(self, 'vala_defines', []))])
packages_private = Utils.to_list(getattr(self, 'packages_private', []))
addflags(['--pkg=%s' % x for x in packages_private])
def _get_api_version():
api_version = '1.0'
if hasattr(Context.g_module, 'API_VERSION'):
version = Context.g_module.API_VERSION.split(".")
if version[0] == "0":
api_version = "0." + version[1]
else:
api_version = version[0] + ".0"
return api_version
self.includes = Utils.to_list(getattr(self, 'includes', []))
valatask.install_path = getattr(self, 'install_path', '')
valatask.vapi_path = getattr(self, 'vapi_path', '${DATAROOTDIR}/vala/vapi')
valatask.pkg_name = getattr(self, 'pkg_name', self.env.PACKAGE)
valatask.header_path = getattr(self, 'header_path', '${INCLUDEDIR}/%s-%s' % (valatask.pkg_name, _get_api_version()))
valatask.install_binding = getattr(self, 'install_binding', True)
self.vapi_dirs = vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', []))
if hasattr(self, 'use'):
local_packages = Utils.to_list(self.use)[:]
seen = []
while len(local_packages) > 0:
package = local_packages.pop()
if package in seen:
continue
seen.append(package)
try:
package_obj = self.bld.get_tgen_by_name(package)
except Errors.WafError:
continue
package_obj.post()
package_name = package_obj.target
task = getattr(package_obj, 'valatask', None)
if task:
for output in task.outputs:
if output.name == package_name + ".vapi":
valatask.set_run_after(task)
if package_name not in packages:
packages.append(package_name)
if output.parent not in vapi_dirs:
vapi_dirs.append(output.parent)
if output.parent not in self.includes:
self.includes.append(output.parent)
if hasattr(package_obj, 'use'):
lst = self.to_list(package_obj.use)
lst.reverse()
local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages
addflags(['--pkg=%s' % p for p in packages])
for vapi_dir in vapi_dirs:
if isinstance(vapi_dir, Node.Node):
v_node = vapi_dir
else:
v_node = self.path.find_dir(vapi_dir)
if not v_node:
Logs.warn('Unable to locate Vala API directory: %r', vapi_dir)
else:
addflags('--vapidir=%s' % v_node.abspath())
self.dump_deps_node = None
if self.is_lib and self.packages:
self.dump_deps_node = valatask.vala_dir_node.find_or_declare('%s.deps' % self.target)
valatask.outputs.append(self.dump_deps_node)
if self.is_lib and valatask.install_binding:
headers_list = [o for o in valatask.outputs if o.suffix() == ".h"]
if headers_list:
self.install_vheader = self.add_install_files(install_to=valatask.header_path, install_from=headers_list)
vapi_list = [o for o in valatask.outputs if (o.suffix() in (".vapi", ".deps"))]
if vapi_list:
self.install_vapi = self.add_install_files(install_to=valatask.vapi_path, install_from=vapi_list)
gir_list = [o for o in valatask.outputs if o.suffix() == '.gir']
if gir_list:
self.install_gir = self.add_install_files(
install_to=getattr(self, 'gir_path', '${DATAROOTDIR}/gir-1.0'), install_from=gir_list
)
if hasattr(self, 'vala_resources'):
nodes = self.to_nodes(self.vala_resources)
valatask.vala_exclude = getattr(valatask, 'vala_exclude', []) + nodes
valatask.inputs.extend(nodes)
for x in nodes:
addflags(['--gresources', x.abspath()])
@extension('.vala', '.gs')
def vala_file(self, node):
try:
valatask = self.valatask
except AttributeError:
valatask = self.valatask = self.create_task('valac')
self.init_vala_task()
valatask.inputs.append(node)
name = node.name[:node.name.rfind('.')] + '.c'
c_node = valatask.vala_dir_node.find_or_declare(name)
valatask.outputs.append(c_node)
self.source.append(c_node)
@extension('.vapi')
def vapi_file(self, node):
try:
valatask = self.valatask
except AttributeError:
valatask = self.valatask = self.create_task('valac')
self.init_vala_task()
valatask.inputs.append(node)
@conf
def find_valac(self, valac_name, min_version):
valac = self.find_program(valac_name, var='VALAC')
try:
output = self.cmd_and_log(valac + ['--version'])
except Errors.WafError:
valac_version = None
else:
ver = re.search(r'\d+.\d+.\d+', output).group().split('.')
valac_version = tuple([int(x) for x in ver])
self.msg(
'Checking for %s version >= %r' % (valac_name, min_version), valac_version, valac_version
and valac_version >= min_version
)
if valac and valac_version < min_version:
self.fatal("%s version %r is too old, need >= %r" % (valac_name, valac_version, min_version))
self.env.VALAC_VERSION = valac_version
return valac
@conf
def check_vala(self, min_version=(0, 8, 0), branch=None):
if self.env.VALA_MINVER:
min_version = self.env.VALA_MINVER
if self.env.VALA_MINVER_BRANCH:
branch = self.env.VALA_MINVER_BRANCH
if not branch:
branch = min_version[:2]
try:
find_valac(self, 'valac-%d.%d' % (branch[0], branch[1]), min_version)
except self.errors.ConfigurationError:
find_valac(self, 'valac', min_version)
@conf
def check_vala_deps(self):
if not self.env.HAVE_GOBJECT:
pkg_args = {'package': 'gobject-2.0', 'uselib_store': 'GOBJECT', 'args': '--cflags --libs'}
if getattr(Options.options, 'vala_target_glib', None):
pkg_args['atleast_version'] = Options.options.vala_target_glib
self.check_cfg(**pkg_args)
if not self.env.HAVE_GTHREAD:
pkg_args = {'package': 'gthread-2.0', 'uselib_store': 'GTHREAD', 'args': '--cflags --libs'}
if getattr(Options.options, 'vala_target_glib', None):
pkg_args['atleast_version'] = Options.options.vala_target_glib
self.check_cfg(**pkg_args)
def configure(self):
self.load('gnu_dirs')
self.check_vala_deps()
self.check_vala()
self.add_os_flags('VALAFLAGS')
self.env.append_unique('VALAFLAGS', ['-C'])
def options(opt):
opt.load('gnu_dirs')
valaopts = opt.add_option_group('Vala Compiler Options')
valaopts.add_option(
'--vala-target-glib',
default=None,
dest='vala_target_glib',
metavar='MAJOR.MINOR',
help='Target version of glib for Vala GObject code generation'
)
|
valac
|
python
|
pytorch__pytorch
|
test/ao/sparsity/test_data_sparsifier.py
|
{
"start": 26407,
"end": 30520
}
|
class ____(TestCase):
def test_ptq_sparsify_first(self):
"""The expectation is post_training_sparse_quantize function
1. Takes in a model
2. Sparsifies the embeddings
3. Quantize the embeddings
This unit test checks that
1. Embeddings and EmbeddingBags are sparsified to the right sparsity levels
2. Embeddings and EmbeddingBags are quantized
3. Linear modules are not quantized
"""
model = Model()
sparse_config = {"sparsity_level": 0.80, "sparse_block_shape": (1, 1)}
select_embeddings = [model.embbag1, model.emb1]
post_training_sparse_quantize(
model,
data_sparsifier_class=DataNormSparsifier,
sparsify_first=True,
select_embeddings=select_embeddings,
**sparse_config,
)
assert type(model.emb1) is torch.ao.nn.quantized.modules.embedding_ops.Embedding
assert (
type(model.embbag1)
is torch.ao.nn.quantized.modules.embedding_ops.EmbeddingBag
)
assert type(model.emb_seq[0] is nn.Embedding)
assert type(model.emb_seq[1] is nn.EmbeddingBag)
assert type(model.linear1) is nn.Linear
assert type(model.linear2) is nn.Linear
dequant_emb1 = torch.dequantize(model.emb1.weight())
dequant_embbag1 = torch.dequantize(model.embbag1.weight())
threshold = 1e-2
sl_emb1 = (torch.abs(dequant_emb1) < threshold).float().mean()
sl_embbag1 = (torch.abs(dequant_embbag1) < threshold).float().mean()
assert abs(sl_emb1 - 0.80) <= 0.05 # +- 5% leeway
assert abs(sl_embbag1 - 0.80) <= 0.05 # +- 5% leeway
def test_ptq_quantize_first(self):
"""The expectation is post_training_sparse_quantize function
1. Takes in a model
2. Quantize the embeddings
3. Sparsifies the embeddings
This unit test checks that
1. Embeddings and EmbeddingBags are sparsified to the right sparsity levels
2. Embeddings and EmbeddingBags are quantized
3. Linear modules are not quantized
"""
model = Model()
sparse_config = {"sparsity_level": 0.8, "sparse_block_shape": (1, 1)}
post_training_sparse_quantize(
model, DataNormSparsifier, sparsify_first=False, **sparse_config
)
assert type(model.emb1) is torch.ao.nn.quantized.modules.embedding_ops.Embedding
assert (
type(model.embbag1)
is torch.ao.nn.quantized.modules.embedding_ops.EmbeddingBag
)
assert (
type(model.emb_seq[0])
is torch.ao.nn.quantized.modules.embedding_ops.Embedding
)
assert (
type(model.emb_seq[1])
is torch.ao.nn.quantized.modules.embedding_ops.EmbeddingBag
)
assert type(model.linear1) is nn.Linear # not quantized
assert type(model.linear2) is nn.Linear # not quantized
dequant_emb1 = torch.dequantize(model.emb1.weight())
dequant_embbag1 = torch.dequantize(model.embbag1.weight())
dequant_emb_seq_0 = torch.dequantize(model.emb_seq[0].weight())
dequant_emb_seq_1 = torch.dequantize(model.emb_seq[1].weight())
# higher threshold as quantization occurs before sparsity
threshold = (
1 # zero points seem to have higher magnitude with sparsity occurring after
)
sl_emb1 = (torch.abs(dequant_emb1) < threshold).float().mean()
sl_embbag1 = (torch.abs(dequant_embbag1) < threshold).float().mean()
sl_emb_seq_0 = (torch.abs(dequant_emb_seq_0) < threshold).float().mean()
sl_emb_seq_1 = (torch.abs(dequant_emb_seq_1) < threshold).float().mean()
assert abs(sl_emb1 - 0.80) <= 0.05 # +- 5% leeway
assert abs(sl_embbag1 - 0.80) <= 0.05 # +- 5% leeway
assert abs(sl_emb_seq_0 - 0.80) <= 0.05 # +- 5% leeway
assert abs(sl_emb_seq_1 - 0.80) <= 0.05 # +- 5% leeway
if __name__ == "__main__":
raise_on_run_directly("test/test_ao_sparsity.py")
|
TestQuantizationUtils
|
python
|
django__django
|
tests/urlpatterns_reverse/tests.py
|
{
"start": 65686,
"end": 68070
}
|
class ____(SimpleTestCase):
url_patterns = [
path("inner/", views.empty_view, name="urlobject-view"),
re_path(
r"^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$",
views.empty_view,
name="urlobject-view",
),
re_path(r"^inner/\+\\\$\*/$", views.empty_view, name="urlobject-special-view"),
]
app_urls = URLObject("inc-app")
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
def test_include_namespace(self):
msg = (
"Specifying a namespace in include() without providing an "
"app_name is not supported."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include(self.url_patterns, "namespace")
def test_include_4_tuple(self):
msg = "Passing a 4-tuple to include() is not supported."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, "app_name", "namespace", "blah"))
def test_include_3_tuple(self):
msg = "Passing a 3-tuple to include() is not supported."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, "app_name", "namespace"))
def test_include_3_tuple_namespace(self):
msg = (
"Cannot override the namespace for a dynamic module that provides a "
"namespace."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, "app_name", "namespace"), "namespace")
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, "app_name")),
(self.url_patterns, "app_name", "app_name"),
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, "app_name"), namespace="namespace"),
(self.url_patterns, "app_name", "namespace"),
)
def test_include_app_name(self):
self.assertEqual(include(self.app_urls), (self.app_urls, "inc-app", "inc-app"))
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, "namespace"), (self.app_urls, "inc-app", "namespace")
)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.urls")
|
IncludeTests
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/models.py
|
{
"start": 3260,
"end": 5722
}
|
class ____(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
# Given a `SocialApp` from which this account originates, this field equals
# the app's `app.provider_id` if available, `app.provider` otherwise.
provider = models.CharField(
verbose_name=_("provider"),
max_length=200,
)
# Just in case you're wondering if an OpenID identity URL is going
# to fit in a 'uid':
#
# Ideally, URLField(max_length=1024, unique=True) would be used
# for identity. However, MySQL has a max_length limitation of 191
# for URLField (in case of utf8mb4). How about
# models.TextField(unique=True) then? Well, that won't work
# either for MySQL due to another bug[1]. So the only way out
# would be to drop the unique constraint, or switch to shorter
# identity URLs. Opted for the latter, as [2] suggests that
# identity URLs are supposed to be short anyway, at least for the
# old spec.
#
# [1] https://code.djangoproject.com/ticket/2495.
# [2] https://openid.net/specs/openid-authentication-1_1.html#limits
uid = models.CharField(
verbose_name=_("uid"), max_length=app_settings.UID_MAX_LENGTH
)
last_login = models.DateTimeField(verbose_name=_("last login"), auto_now=True)
date_joined = models.DateTimeField(verbose_name=_("date joined"), auto_now_add=True)
extra_data = models.JSONField(verbose_name=_("extra data"), default=dict)
class Meta:
unique_together = ("provider", "uid")
verbose_name = _("social account")
verbose_name_plural = _("social accounts")
def authenticate(self):
return authenticate(account=self)
def __str__(self):
from .helpers import socialaccount_user_display
return socialaccount_user_display(self)
def get_profile_url(self):
return self.get_provider_account().get_profile_url()
def get_avatar_url(self):
return self.get_provider_account().get_avatar_url()
def get_provider(self, request=None):
provider = getattr(self, "_provider", None)
if provider:
return provider
adapter = get_adapter()
provider = self._provider = adapter.get_provider(
request or context.request, provider=self.provider
)
return provider
def get_provider_account(self):
return self.get_provider().wrap_account(self)
|
SocialAccount
|
python
|
realpython__materials
|
thread-safety-locks/bank_rlock.py
|
{
"start": 81,
"end": 1188
}
|
class ____:
def __init__(self):
self.balance = 0
self.lock = threading.RLock()
def deposit(self, amount):
print(
f"Thread {threading.current_thread().name} "
"waiting to acquire lock for .deposit()"
)
with self.lock:
print(
f"Thread {threading.current_thread().name} acquired lock for .deposit()"
)
time.sleep(0.1)
self._update_balance(amount)
def _update_balance(self, amount):
print(
f"Thread {threading.current_thread().name} "
"waiting to acquire lock for ._update_balance()"
)
with self.lock:
print(
f"Thread {threading.current_thread().name} "
"acquired lock for ._update_balance()"
)
self.balance += amount
account = BankAccount()
with ThreadPoolExecutor(
max_workers=3, thread_name_prefix="Worker"
) as executor:
for _ in range(3):
executor.submit(account.deposit, 100)
print(f"Final balance: {account.balance}")
|
BankAccount
|
python
|
huggingface__transformers
|
src/transformers/models/glm4_moe/configuration_glm4_moe.py
|
{
"start": 1317,
"end": 10422
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4MoeModel`]. It is used to instantiate a
Glm4Moe model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of [THUDM/GLM-4-100B-A10B](https://huggingface.co/THUDM/GLM-4-100B-A10B).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151552):
Vocabulary size of the Glm4Moe model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Glm4MoeModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 10944):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 46):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 96):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
moe_intermediate_size (`int`, *optional*, defaults to 1408):
Intermediate size of the routed expert.
num_experts_per_tok (`int`, *optional*, defaults to 8):
number of experts per token.
n_shared_experts (`int`, *optional*, defaults to 1):
Number of shared experts.
n_routed_experts (`int`, *optional*, defaults to 128):
Number of routed experts.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor or routed experts.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
first_k_dense_replace (`int`, *optional*, defaults to 1):
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
\--k dense layers--/
norm_topk_prob (`bool`, *optional*, defaults to `True`):
Whether to normalize the topk probabilities.
use_qk_norm (`bool`, *optional*, defaults to `False`):
Whether to use query-key normalization in the attention
```python
>>> from transformers import Glm4MoeModel, Glm4MoeConfig
>>> # Initializing a Glm4Moe style configuration
>>> configuration = Glm4MoeConfig()
>>> # Initializing a model from the GLM-4-MOE-100B-A10B style configuration
>>> model = Glm4MoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4_moe"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Glm4Moe`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
attribute_map = {
"num_local_experts": "n_routed_experts",
}
def __init__(
self,
vocab_size: Optional[int] = 151552,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 10944,
num_hidden_layers: Optional[int] = 46,
num_attention_heads: Optional[int] = 96,
num_key_value_heads: Optional[int] = 8,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 131072,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
moe_intermediate_size: Optional[int] = 1408,
num_experts_per_tok: Optional[int] = 8,
n_shared_experts: Optional[int] = 1,
n_routed_experts: Optional[int] = 128,
routed_scaling_factor: Optional[float] = 1.0,
n_group: Optional[int] = 1,
topk_group: Optional[int] = 1,
first_k_dense_replace: Optional[int] = 1,
norm_topk_prob: Optional[bool] = True,
use_qk_norm: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC
# MoE arguments
self.moe_intermediate_size = moe_intermediate_size
self.num_experts_per_tok = num_experts_per_tok
self.n_group = n_group
self.topk_group = topk_group
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.routed_scaling_factor = routed_scaling_factor
self.first_k_dense_replace = first_k_dense_replace
self.norm_topk_prob = norm_topk_prob
self.use_qk_norm = use_qk_norm
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["Glm4MoeConfig"]
|
Glm4MoeConfig
|
python
|
doocs__leetcode
|
lcci/17.10.Find Majority Element/Solution.py
|
{
"start": 0,
"end": 289
}
|
class ____:
def majorityElement(self, nums: List[int]) -> int:
cnt = m = 0
for v in nums:
if cnt == 0:
m, cnt = v, 1
else:
cnt += 1 if m == v else -1
return m if nums.count(m) > len(nums) // 2 else -1
|
Solution
|
python
|
google__pytype
|
pytype/output.py
|
{
"start": 908,
"end": 46459
}
|
class ____(utils.ContextWeakrefMixin):
"""Functions for converting abstract classes into PyTD."""
class OutputMode(enum.IntEnum):
"""Controls the level of detail in pytd types. See set_output_mode."""
NORMAL = 0
DETAILED = 1
LITERAL = 2
def __init__(self, ctx):
super().__init__(ctx)
self._output_mode = Converter.OutputMode.NORMAL
self._optimize_literals = False
self._scopes = []
@contextlib.contextmanager
def set_output_mode(self, mode):
"""Change the level of detail in pytd types.
Args:
mode: Converter.OutputMode option controlling the level of detail to use.
NORMAL - the default, safe for pyi files.
DETAILED - more detail, unsafe for pyi files. The converter will do
things like using the names of inner classes rather than Any and
including the known argument types for a callable even if the argument
count is unknown. Useful for error messages.
LITERAL - like DETAILED, but bool, int, str, and bytes constants will be
emitted as Literal[<constant>] rather than their type.
Yields:
None.
"""
old = self._output_mode
self._output_mode = mode
yield
self._output_mode = old
@contextlib.contextmanager
def optimize_literals(self, val=True):
"""Optimize output of literal data structures in pyi files."""
old = self._optimize_literals
self._optimize_literals = val
yield
self._optimize_literals = old
@property
def _detailed(self):
return self._output_mode >= Converter.OutputMode.DETAILED
def _get_values(self, node, var, view):
if var.bindings and view is not None:
return [view[var].data]
elif self._optimize_literals and len(var.bindings) > LARGE_LITERAL_SIZE:
# Performance optimisation: If we have so many elements in a container, it
# is very likely a literal, and would ideally be constructed at a single
# CFG node. Therefore, we do not attempt to filter its bindings (which is
# a very expensive operation for large collections).
return var.data
elif node:
return var.FilteredData(node, strict=False)
else:
return var.data
def _is_tuple(self, v, instance):
return isinstance(v, abstract.TupleClass) or isinstance(
instance, abstract.Tuple
)
def _make_decorator(self, name, alias):
# If decorators are output as aliases to NamedTypes, they will be converted
# to Functions and fail a verification step if those functions have type
# parameters. Since we just want the function name, and since we have a
# fully resolved name at this stage, we just output a minimal pytd.Function
sig = pytd.Signature((), None, None, pytd.AnythingType(), (), ())
fn = pytd.Function(
name, (sig,), pytd.MethodKind.METHOD, pytd.MethodFlag.NONE
)
return pytd.Alias(alias, fn)
def _make_decorators(self, decorators):
return [
self._make_decorator(d, d)
for d in decorators
if class_mixin.get_metadata_key(d)
]
def _value_to_parameter_types(self, node, v, instance, template, seen, view):
"""Get PyTD types for the parameters of an instance of an abstract value."""
if isinstance(v, abstract.CallableClass):
assert template == (abstract_utils.ARGS, abstract_utils.RET), template
should_preserve_paramspec = (
v.has_paramspec()
and v.num_args == 1
and instance
and isinstance(
instance.get_formal_type_parameter(abstract_utils.ARGS),
abstract.Unsolvable,
)
)
if not should_preserve_paramspec:
template = list(range(v.num_args)) + [template[1]]
if self._is_tuple(v, instance):
if isinstance(v, abstract.TupleClass):
new_template = range(v.tuple_length)
else:
new_template = range(instance.tuple_length)
if template:
assert len(template) == 1 and template[0] == abstract_utils.T, template
else:
# We have a recursive type. By erasing the instance and value
# information, we'll return Any for all of the tuple elements.
v = instance = None
template = new_template
if instance is None and isinstance(v, abstract.ParameterizedClass):
assert v
return [
self.value_instance_to_pytd_type(
node, v.get_formal_type_parameter(t), None, seen, view
)
for t in template
]
elif isinstance(instance, abstract.SimpleValue):
assert instance
type_arguments = []
for t in template:
if isinstance(instance, abstract.Tuple):
elem_var = instance.pyval[t]
if abstract_utils.is_var_splat(elem_var):
elem_var = abstract_utils.unwrap_splat(elem_var)
param_values = {
val: view
for val in self._get_values(node, elem_var, view)
}
elif instance.has_instance_type_parameter(t):
param_values = {
val: view
for val in self._get_values(
node, instance.get_instance_type_parameter(t), view
)
}
elif isinstance(v, abstract.CallableClass):
param_node = node or self.ctx.root_node
param_var = v.get_formal_type_parameter(t).instantiate(param_node)
if view is None:
param_values = {val: None for val in param_var.data}
else:
param_values = {}
for new_view in abstract_utils.get_views([param_var], param_node):
new_view.update(view)
param_values[new_view[param_var].data] = new_view
else:
param_values = {self.ctx.convert.unsolvable: view}
formal_param = v.get_formal_type_parameter(t)
if (
t == abstract_utils.ARGS
and isinstance(v, abstract.CallableClass)
and v.has_paramspec()
and [*param_values] == [self.ctx.convert.unsolvable]
):
# This is a Callable[P, T] where P is unsolvable.
arg = pytd.AnythingType()
# If the instance's parameter value is unsolvable or the parameter type
# is recursive, we can get a more precise type from the class. Note that
# we need to be careful not to introduce unbound type parameters.
elif (
isinstance(v, abstract.ParameterizedClass)
and not formal_param.formal
and (
list(param_values.keys()) == [self.ctx.convert.unsolvable]
or abstract_utils.is_recursive_annotation(formal_param)
)
):
arg = self.value_instance_to_pytd_type(
node, formal_param, None, seen, view
)
else:
arg = pytd_utils.JoinTypes(
self.value_to_pytd_type(node, p, seen, param_view)
for p, param_view in param_values.items()
)
type_arguments.append(arg)
return type_arguments
else:
return [pytd.AnythingType() for _ in template]
def value_instance_to_pytd_type(self, node, v, instance, seen, view):
"""Get the PyTD type an instance of this object would have.
Args:
node: The node.
v: The object.
instance: The instance.
seen: Already seen instances.
view: A Variable -> binding map.
Returns:
A PyTD type.
"""
if abstract_utils.is_recursive_annotation(v):
return pytd.LateType(v.unflatten_expr() if self._detailed else v.expr)
elif isinstance(v, abstract.Union):
return pytd.UnionType(
tuple(
self.value_instance_to_pytd_type(node, t, instance, seen, view)
for t in v.options
)
)
elif isinstance(v, abstract.AnnotationContainer):
return self.value_instance_to_pytd_type(
node, v.base_cls, instance, seen, view
)
elif isinstance(v, abstract.LiteralClass):
if isinstance(v.value, abstract.Instance) and v.value.cls.is_enum:
typ = pytd_utils.NamedTypeWithModule(
v.value.cls.official_name or v.value.cls.name, v.value.cls.module
)
value = pytd.Constant(v.value.name, typ)
elif isinstance(v.value.pyval, (str, bytes)):
# Strings are stored as strings of their representations, prefix and
# quotes and all.
value = repr(v.value.pyval)
elif isinstance(v.value.pyval, bool):
# True and False are stored as pytd constants.
value = self.ctx.loader.lookup_pytd("builtins", v.value.pyval)
else:
# Ints are stored as their literal values. Note that Literal[None] or a
# nested literal will never appear here, since we simplified it to None
# or unnested it, respectively, in typing_overlay.
assert isinstance(v.value.pyval, int), v.value.pyval
value = v.value.pyval
return pytd.Literal(value)
elif isinstance(v, typed_dict.TypedDictClass):
# TypedDict inherits from abstract.Dict for analysis purposes, but when
# outputting to a pyi we do not want to treat it as a generic type.
return pytd.NamedType(v.name)
elif isinstance(v, fiddle_overlay.BuildableType):
# TODO(mdemello): This should Just Work via the base PyTDClass!
param = self.value_instance_to_pytd_type(
node, v.underlying, None, seen, view
)
return pytd.GenericType(
base_type=pytd.NamedType(f"fiddle.{v.fiddle_type_name}"),
parameters=(param,),
)
elif isinstance(v, abstract.Class):
if not self._detailed and v.official_name is None:
return pytd.AnythingType()
if seen is None:
# We make the set immutable to ensure that the seen instances for
# different parameter values don't interfere with one another.
seen = frozenset()
if instance in seen:
# We have a circular dependency in our types (e.g., lst[0] == lst). Stop
# descending into the type parameters.
type_params = ()
else:
type_params = tuple(t.name for t in v.template)
if instance is not None:
seen |= {instance}
type_arguments = self._value_to_parameter_types(
node, v, instance, type_params, seen, view
)
base = pytd_utils.NamedTypeWithModule(v.official_name or v.name, v.module)
if self._is_tuple(v, instance):
homogeneous = False
elif v.full_name == "typing.Callable":
is_callable_with_unsolvable_paramspec = (
isinstance(v, abstract.CallableClass)
and v.has_paramspec()
and len(type_arguments) == 2
and isinstance(type_arguments[0], pytd.AnythingType)
)
homogeneous = (
not isinstance(v, abstract.CallableClass)
or is_callable_with_unsolvable_paramspec
)
else:
homogeneous = len(type_arguments) == 1
return pytd_utils.MakeClassOrContainerType(
base, type_arguments, homogeneous
)
elif isinstance(v, abstract.TYPE_VARIABLE_TYPES):
# We generate the full definition because, if this type parameter is
# imported, we will need the definition in order to declare it later.
return self._type_variable_to_def(node, v, v.name)
elif isinstance(v, typing_overlay.Never):
return pytd.NothingType()
elif isinstance(v, abstract.Concatenate):
params = tuple(
self.value_instance_to_pytd_type(node, t, instance, seen, view)
for t in v.args + [v.paramspec]
)
return pytd.Concatenate(
pytd.NamedType("typing.Concatenate"), parameters=params
)
else:
log.info("Using Any for instance of %s", v.name)
return pytd.AnythingType()
def _type_variable_to_pytd_type(self, node, v, seen, view):
if v.scope in self._scopes or isinstance(
v.instance, abstract_utils.DummyContainer
):
if isinstance(v, abstract.TYPE_VARIABLE_INSTANCES):
return self._type_variable_to_def(node, v.param, v.param.name)
else:
assert False, f"Unexpected type variable type: {type(v)}"
elif v.instance.get_instance_type_parameter(v.full_name).bindings:
# The type parameter was initialized. Set the view to None, since we
# don't include v.instance in the view.
return pytd_utils.JoinTypes(
self.value_to_pytd_type(node, p, seen, None)
for p in v.instance.get_instance_type_parameter(v.full_name).data
)
elif v.param.constraints:
return pytd_utils.JoinTypes(
self.value_instance_to_pytd_type(node, p, None, seen, view)
for p in v.param.constraints
)
elif v.param.bound:
return self.value_instance_to_pytd_type(
node, v.param.bound, None, seen, view
)
else:
return pytd.AnythingType()
def value_to_pytd_type(self, node, v, seen, view):
"""Get a PyTD type representing this object, as seen at a node.
Args:
node: The node from which we want to observe this object.
v: The object.
seen: The set of values seen before while computing the type.
view: A Variable -> binding map.
Returns:
A PyTD type.
"""
if isinstance(v, (abstract.Empty, typing_overlay.Never)):
return pytd.NothingType()
elif isinstance(v, abstract.TYPE_VARIABLE_INSTANCES):
return self._type_variable_to_pytd_type(node, v, seen, view)
elif isinstance(v, (typing_overlay.TypeVar, typing_overlay.ParamSpec)):
return pytd.NamedType("builtins.type")
elif isinstance(v, dataclass_overlay.FieldInstance):
if not v.default:
return pytd.AnythingType()
return pytd_utils.JoinTypes(
self.value_to_pytd_type(node, d, seen, view) for d in v.default.data
)
elif isinstance(v, attr_overlay.AttribInstance):
ret = self.value_to_pytd_type(node, v.typ, seen, view)
md = metadata.to_pytd(v.to_metadata())
return pytd.Annotated(ret, ("'pytype_metadata'", md))
elif isinstance(v, special_builtins.PropertyInstance):
return pytd.NamedType("builtins.property")
elif isinstance(v, typed_dict.TypedDict):
return pytd.NamedType(v.props.name)
elif isinstance(v, abstract.FUNCTION_TYPES):
try:
signatures = function.get_signatures(v)
except NotImplementedError:
return pytd.NamedType("typing.Callable")
if len(signatures) == 1:
val = self.signature_to_callable(signatures[0])
if not isinstance(v, abstract.PYTD_FUNCTION_TYPES) or not val.formal:
# This is a workaround to make sure we don't put unexpected type
# parameters in call traces.
return self.value_instance_to_pytd_type(node, val, None, seen, view)
return pytd.NamedType("typing.Callable")
elif isinstance(v, (abstract.ClassMethod, abstract.StaticMethod)):
return self.value_to_pytd_type(node, v.method, seen, view)
elif isinstance(
v, (special_builtins.IsInstance, special_builtins.ClassMethodCallable)
):
return pytd.NamedType("typing.Callable")
elif isinstance(v, abstract.Class):
param = self.value_instance_to_pytd_type(node, v, None, seen, view)
return pytd.GenericType(
base_type=pytd.NamedType("builtins.type"), parameters=(param,)
)
elif isinstance(v, abstract.Module):
return pytd.Alias(v.name, pytd.Module(v.name, module_name=v.full_name))
elif (
self._output_mode >= Converter.OutputMode.LITERAL
and isinstance(v, abstract.ConcreteValue)
and isinstance(v.pyval, (int, str, bytes))
):
# LITERAL mode is used only for pretty-printing, so we just stringify the
# inner value rather than properly converting it.
return pytd.Literal(repr(v.pyval))
elif isinstance(v, abstract.SimpleValue):
ret = self.value_instance_to_pytd_type(
node, v.cls, v, seen=seen, view=view
)
ret.Visit(
visitors.FillInLocalPointers({"builtins": self.ctx.loader.builtins})
)
return ret
elif isinstance(v, abstract.Union):
return pytd_utils.JoinTypes(
self.value_to_pytd_type(node, o, seen, view) for o in v.options
)
elif isinstance(v, special_builtins.SuperInstance):
return pytd.NamedType("builtins.super")
elif isinstance(v, abstract.TypeParameter):
# Arguably, the type of a type parameter is NamedType("typing.TypeVar"),
# but pytype doesn't know how to handle that, so let's just go with Any
# unless self._detailed is set.
if self._detailed:
return pytd.NamedType("typing.TypeVar")
else:
return pytd.AnythingType()
elif isinstance(v, abstract.ParamSpec):
# Follow the same logic as `TypeVar`s
if self._detailed:
return pytd.NamedType("typing.ParamSpec")
else:
return pytd.AnythingType()
elif isinstance(v, abstract.Unsolvable):
return pytd.AnythingType()
elif isinstance(v, abstract.Unknown):
return pytd.NamedType(v.class_name)
elif isinstance(v, abstract.BuildClass):
return pytd.NamedType("typing.Callable")
elif isinstance(v, abstract.FinalAnnotation):
param = self.value_to_pytd_type(node, v.annotation, seen, view)
return pytd.GenericType(
base_type=pytd.NamedType("typing.Final"), parameters=(param,)
)
elif isinstance(v, abstract.SequenceLength):
# For debugging purposes, while developing the feature.
return pytd.Annotated(
base_type=pytd.NamedType("SequenceLength"),
annotations=(str(v.length), str(v.splat)),
)
elif isinstance(v, abstract.Concatenate):
# For debugging purposes, while developing the feature.
return pytd.NamedType("typing.Concatenate")
elif isinstance(v, function.ParamSpecMatch):
return pytd.AnythingType()
elif isinstance(v, abstract.ParamSpecArgs):
return pytd.AnythingType()
else:
raise NotImplementedError(v.__class__.__name__)
def signature_to_callable(self, sig):
"""Converts a function.Signature object into a callable object.
Args:
sig: The signature to convert.
Returns:
An abstract.CallableClass representing the signature, or an
abstract.ParameterizedClass if the signature has a variable number of
arguments.
"""
base_cls = self.ctx.convert.function_type
ret = sig.annotations.get("return", self.ctx.convert.unsolvable)
if not sig.kwonly_params and (
self._detailed
or (sig.mandatory_param_count() == sig.maximum_param_count())
):
# If self._detailed is false, we throw away the argument types if the
# function takes a variable number of arguments, which is correct for pyi
# generation but undesirable for, say, error message printing.
args = [
sig.annotations.get(name, self.ctx.convert.unsolvable)
for name in sig.param_names
]
params = {
abstract_utils.ARGS: self.ctx.convert.merge_values(args),
abstract_utils.RET: ret,
}
params.update(enumerate(args))
return abstract.CallableClass(base_cls, params, self.ctx)
else:
# The only way to indicate kwonly arguments or a variable number of
# arguments in a Callable is to not specify argument types at all.
params = {
abstract_utils.ARGS: self.ctx.convert.unsolvable,
abstract_utils.RET: ret,
}
return abstract.ParameterizedClass(base_cls, params, self.ctx)
def value_to_pytd_def(self, node, v, name):
"""Get a PyTD definition for this object.
Args:
node: The node.
v: The object.
name: The object name.
Returns:
A PyTD definition.
"""
if isinstance(v, abstract.Module):
return pytd.Alias(name, pytd.Module(name, module_name=v.full_name))
elif isinstance(v, abstract.BoundFunction):
d = self.value_to_pytd_def(node, v.underlying, name)
assert isinstance(d, pytd.Function)
sigs = tuple(sig.Replace(params=sig.params[1:]) for sig in d.signatures)
return d.Replace(signatures=sigs)
elif isinstance(v, attr_overlay.AttrsBase):
ret = pytd.NamedType("typing.Callable")
md = metadata.to_pytd(v.to_metadata())
return pytd.Annotated(ret, ("'pytype_metadata'", md))
elif isinstance(v, abstract.PyTDFunction) and not isinstance(
v, typing_overlay.TypeVar
):
return pytd.Function(
name=name,
signatures=tuple(sig.pytd_sig for sig in v.signatures),
kind=v.kind,
flags=pytd.MethodFlag.abstract_flag(v.is_abstract),
)
elif isinstance(v, abstract.InterpreterFunction):
return self._function_to_def(node, v, name)
elif isinstance(v, abstract.SimpleFunction):
return self._simple_func_to_def(node, v, name)
elif isinstance(v, (abstract.ParameterizedClass, abstract.Union)):
return pytd.Alias(name, v.to_pytd_type_of_instance(node))
elif isinstance(v, abstract.PyTDClass) and v.module:
# This happens if a module does e.g. "from x import y as z", i.e., copies
# something from another module to the local namespace. We *could*
# reproduce the entire class, but we choose a more dense representation.
return v.to_pytd_type(node)
elif isinstance(v, typed_dict.TypedDictClass):
return self._typed_dict_to_def(node, v, name)
elif isinstance(v, abstract.PyTDClass): # a namedtuple instance
assert name != v.name
return pytd.Alias(name, pytd.NamedType(v.name))
elif isinstance(v, abstract.InterpreterClass):
if (
v.official_name is None
or name == v.official_name
or v.official_name.endswith(f".{name}")
) and not v.module:
return self._class_to_def(node, v, name)
else:
# Represent a class alias as X: Type[Y] rather than X = Y so the pytd
# printer can distinguish it from a module alias.
type_name = v.full_name if v.module else v.official_name
return pytd.Constant(
name,
pytd.GenericType(
pytd.NamedType("builtins.type"), (pytd.NamedType(type_name),)
),
)
elif isinstance(v, abstract.TYPE_VARIABLE_TYPES):
return self._type_variable_to_def(node, v, name)
elif isinstance(v, abstract.Unsolvable):
return pytd.Constant(name, v.to_pytd_type(node))
else:
raise NotImplementedError(v.__class__.__name__)
def _ordered_attrs_to_instance_types(self, node, attr_metadata, annots):
"""Get instance types for ordered attrs in the metadata."""
attrs = attr_metadata.get("attr_order", [])
if not annots or not attrs:
return
# Use the ordering from attr_order, but use the types in the annotations
# dict, to handle InitVars correctly (an InitVar without a default will be
# in attr_order, but not in annotations, and an InitVar with a default will
# have its type in attr_order set to the inner type).
annotations = dict(annots.get_annotations(node))
for a in attrs:
if a.name in annotations:
typ = annotations[a.name]
elif a.kind == class_mixin.AttributeKinds.INITVAR:
# Do not output initvars without defaults
typ = None
else:
typ = a.typ
typ = typ and typ.to_pytd_type_of_instance(node)
yield a.name, typ
def annotations_to_instance_types(self, node, annots):
"""Get instance types for annotations not present in the members map."""
if annots:
for name, local in annots.annotated_locals.items():
typ = local.get_type(node, name)
if typ:
t = typ.to_pytd_type_of_instance(node)
if local.final:
t = pytd.GenericType(pytd.NamedType("typing.Final"), (t,))
yield name, t
def _function_call_to_return_type(self, node, v, seen_return, num_returns):
"""Get a function call's pytd return type."""
if v.signature.has_return_annotation:
ret = v.signature.annotations["return"].to_pytd_type_of_instance(node)
else:
ret = seen_return.data.to_pytd_type(node)
if isinstance(ret, pytd.NothingType) and num_returns == 1:
if isinstance(seen_return.data, abstract.Empty):
ret = pytd.AnythingType()
else:
assert isinstance(seen_return.data, typing_overlay.Never)
return ret
def _function_call_combination_to_signature(
self, func, call_combination, num_combinations
):
node_after, combination, return_value = call_combination
params = []
for i, (name, kind, optional) in enumerate(func.get_parameters()):
if i < func.nonstararg_count and name in func.signature.annotations:
t = func.signature.annotations[name].to_pytd_type_of_instance(
node_after
)
else:
t = combination[name].data.to_pytd_type(node_after)
# Python uses ".0" etc. for the names of parameters that are tuples,
# like e.g. in: "def f((x, y), z)".
params.append(
pytd.Parameter(name.replace(".", "_"), t, kind, optional, None)
)
ret = self._function_call_to_return_type(
node_after, func, return_value, num_combinations
)
if func.has_varargs():
if func.signature.varargs_name in func.signature.annotations:
annot = func.signature.annotations[func.signature.varargs_name]
typ = annot.to_pytd_type_of_instance(node_after)
else:
typ = pytd.NamedType("builtins.tuple")
starargs = pytd.Parameter(
func.signature.varargs_name,
typ,
pytd.ParameterKind.REGULAR,
True,
None,
)
else:
starargs = None
if func.has_kwargs():
if func.signature.kwargs_name in func.signature.annotations:
annot = func.signature.annotations[func.signature.kwargs_name]
typ = annot.to_pytd_type_of_instance(node_after)
else:
typ = pytd.NamedType("builtins.dict")
starstarargs = pytd.Parameter(
func.signature.kwargs_name,
typ,
pytd.ParameterKind.REGULAR,
True,
None,
)
else:
starstarargs = None
return pytd.Signature(
params=tuple(params),
starargs=starargs,
starstarargs=starstarargs,
return_type=ret,
exceptions=(), # TODO(b/159052087): record exceptions
template=(),
)
def _function_to_def(self, node, v, function_name):
"""Convert an InterpreterFunction to a PyTD definition."""
signatures = []
for func in v.signature_functions():
combinations = func.get_call_combinations(node)
num_combinations = len(combinations)
signatures.extend(
self._function_call_combination_to_signature(
func, combination, num_combinations
)
for combination in combinations
)
decorators = tuple(self._make_decorators(v.decorators))
return pytd.Function(
name=function_name,
signatures=tuple(signatures),
kind=pytd.MethodKind.METHOD,
flags=pytd.MethodFlag.abstract_flag(v.is_abstract),
decorators=decorators,
)
def _simple_func_to_def(self, node, v, name):
"""Convert a SimpleFunction to a PyTD definition."""
sig = v.signature
def get_parameter(p, kind):
if p in sig.annotations:
param_type = sig.annotations[p].to_pytd_type_of_instance(node)
else:
param_type = pytd.AnythingType()
return pytd.Parameter(p, param_type, kind, p in sig.defaults, None)
posonly = [
get_parameter(p, pytd.ParameterKind.POSONLY) for p in sig.posonly_params
]
params = [
get_parameter(p, pytd.ParameterKind.REGULAR)
for p in sig.param_names[sig.posonly_count :]
]
kwonly = [
get_parameter(p, pytd.ParameterKind.KWONLY) for p in sig.kwonly_params
]
if sig.varargs_name:
star = pytd.Parameter(
sig.varargs_name,
sig.annotations[sig.varargs_name].to_pytd_type_of_instance(node),
pytd.ParameterKind.REGULAR,
False,
None,
)
else:
star = None
if sig.kwargs_name:
starstar = pytd.Parameter(
sig.kwargs_name,
sig.annotations[sig.kwargs_name].to_pytd_type_of_instance(node),
pytd.ParameterKind.REGULAR,
False,
None,
)
else:
starstar = None
if sig.has_return_annotation:
ret_type = sig.annotations["return"].to_pytd_type_of_instance(node)
else:
ret_type = pytd.NamedType("builtins.NoneType")
pytd_sig = pytd.Signature(
params=tuple(posonly + params + kwonly),
starargs=star,
starstarargs=starstar,
return_type=ret_type,
exceptions=(),
template=(),
)
return pytd.Function(name, (pytd_sig,), pytd.MethodKind.METHOD)
def _function_to_return_types(self, node, fvar, allowed_type_params=()):
"""Convert a function variable to a list of PyTD return types."""
options = fvar.FilteredData(self.ctx.exitpoint, strict=False)
if not all(isinstance(o, abstract.Function) for o in options):
return [pytd.AnythingType()]
types = []
for val in options:
if isinstance(val, abstract.InterpreterFunction):
combinations = val.get_call_combinations(node)
for node_after, _, return_value in combinations:
types.append(
self._function_call_to_return_type(
node_after, val, return_value, len(combinations)
)
)
elif isinstance(val, abstract.PyTDFunction):
types.extend(sig.pytd_sig.return_type for sig in val.signatures)
else:
types.append(pytd.AnythingType())
safe_types = [] # types with illegal type parameters removed
for t in types:
params = pytd_utils.GetTypeParameters(t)
t = t.Visit(
visitors.ReplaceTypeParameters({
p: p if p.name in allowed_type_params else p.upper_value
for p in params
})
)
safe_types.append(t)
return safe_types
def _is_instance(self, value, cls_name):
return (
isinstance(value, abstract.Instance) and value.cls.full_name == cls_name
)
def _class_to_def(self, node, v, class_name):
"""Convert an InterpreterClass to a PyTD definition."""
self._scopes.append(class_name)
methods = {}
constants = collections.defaultdict(pytd_utils.TypeBuilder)
annots = abstract_utils.get_annotations_dict(v.members)
annotated_names = set()
def add_constants(iterator):
for name, t in iterator:
if t is None:
# Remove the entry from constants
annotated_names.add(name)
elif name not in annotated_names:
constants[name].add_type(t)
annotated_names.add(name)
add_constants(
self._ordered_attrs_to_instance_types(node, v.metadata, annots)
)
add_constants(self.annotations_to_instance_types(node, annots))
def add_final(defn, value):
if value.final:
return defn.Replace(flags=defn.flags | pytd.MethodFlag.FINAL)
else:
return defn
def get_decorated_method(name, value, func_slot):
fvar = getattr(value, func_slot)
func = abstract_utils.get_atomic_value(fvar, abstract.Function)
defn = self.value_to_pytd_def(node, func, name)
defn = defn.Visit(visitors.DropMutableParameters())
defn = add_final(defn, value)
return defn
def add_decorated_method(name, value, kind):
try:
defn = get_decorated_method(name, value, "func")
except (AttributeError, abstract_utils.ConversionError):
constants[name].add_type(pytd.AnythingType())
return
defn = defn.Replace(kind=kind)
methods[name] = defn
decorators = self._make_decorators(v.decorators)
if v.final:
decorators.append(self._make_decorator("typing.final", "final"))
# Collect nested classes
classes = [
self.value_to_pytd_def(node, x, x.name) for x in v.get_inner_classes()
]
inner_class_names = {x.name for x in classes}
class_type_params = {t.name for t in v.template}
# class-level attributes
for name, member in v.members.items():
if (
name in abstract_utils.CLASS_LEVEL_IGNORE
or name in annotated_names
or (v.is_enum and name in ("__new__", "__eq__"))
or name in inner_class_names
):
continue
for value in member.FilteredData(self.ctx.exitpoint, strict=False):
if isinstance(value, special_builtins.PropertyInstance):
# For simplicity, output properties as constants, since our parser
# turns them into constants anyway.
if value.fget:
for typ in self._function_to_return_types(
node, value.fget, allowed_type_params=class_type_params
):
constants[name].add_type(pytd.Annotated(typ, ("'property'",)))
else:
constants[name].add_type(
pytd.Annotated(pytd.AnythingType(), ("'property'",))
)
elif isinstance(value, special_builtins.StaticMethodInstance):
add_decorated_method(name, value, pytd.MethodKind.STATICMETHOD)
elif isinstance(value, special_builtins.ClassMethodInstance):
add_decorated_method(name, value, pytd.MethodKind.CLASSMETHOD)
elif isinstance(value, abstract.Function):
# value_to_pytd_def returns different pytd node types depending on the
# input type, which pytype struggles to reason about.
method = cast(
pytd.Function, self.value_to_pytd_def(node, value, name)
)
def fix(sig):
if not sig.params:
return sig
# Check whether the signature's 'self' type is the current class.
self_type = sig.params[0].type
maybe_params = pytd_utils.UnpackGeneric(self_type, "builtins.type")
if maybe_params:
self_type_name = maybe_params[0].name
else:
self_type_name = self_type.name
if not self_type_name:
return sig
full_name = v.official_name or v.name
if not re.fullmatch(rf"{full_name}(\[.*\])?", self_type_name):
return None
# Remove any outer class prefixes from the type name.
if "." in full_name:
new_self_type = self_type.Replace(name=v.name)
new_first_param = sig.params[0].Replace(type=new_self_type)
return sig.Replace(params=(new_first_param,) + sig.params[1:])
else:
return sig
if (
isinstance(value, abstract.InterpreterFunction)
and len(value.signature_functions()) > 1
):
# We should never discard overloads in the source code.
signatures = method.signatures
else:
signatures = tuple(
filter(None, (fix(s) for s in method.signatures))
)
if signatures and signatures != method.signatures:
# Filter out calls made from subclasses unless they are the only
# ones recorded; when inferring types for ParentClass.__init__, we
# do not want `self: Union[ParentClass, Subclass]`.
method = method.Replace(signatures=signatures)
method = add_final(method, value)
# TODO(rechen): Removing mutations altogether won't work for generic
# classes. To support those, we'll need to change the mutated type's
# base to the current class, rename aliased type parameters, and
# replace any parameter not in the class or function template with
# its upper value.
methods[name] = method.Visit(visitors.DropMutableParameters())
elif v.is_enum:
if any(
isinstance(enum_member, abstract.Instance)
and enum_member.cls == v
for enum_member in member.data
):
# i.e. if this is an enum that has any enum members, and the current
# member is an enum member.
# In this case, we would normally output:
# class M(enum.Enum):
# A: M
# However, this loses the type of A.value. Instead, annotate members
# with the type of their value. (This is what typeshed does.)
# class M(enum.Enum):
# A: int
enum_member = abstract_utils.get_atomic_value(member)
node, attr_var = self.ctx.attribute_handler.get_attribute(
node, enum_member, "value"
)
attr = abstract_utils.get_atomic_value(attr_var)
with self.set_output_mode(Converter.OutputMode.LITERAL):
constants[name].add_type(attr.to_pytd_type(node))
else:
# i.e. this is an enum, and the current member is NOT an enum
# member. Which means it's a ClassVar.
cls_member = abstract_utils.get_atomic_value(member)
constants[name].add_type(
pytd.GenericType(
base_type=pytd.NamedType("typing.ClassVar"),
parameters=((cls_member.to_pytd_type(node),)),
)
)
else:
cls = self.ctx.convert.merge_classes([value])
node, attr = self.ctx.attribute_handler.get_attribute(
node, cls, "__get__"
)
if attr:
# This attribute is a descriptor. Its type is the return value of
# its __get__ method.
for typ in self._function_to_return_types(node, attr):
constants[name].add_type(typ)
else:
constants[name].add_type(value.to_pytd_type(node))
# Instance-level attributes: all attributes from 'canonical' instances (that
# is, ones created by analyze.py:analyze_class()) are added. Attributes from
# non-canonical instances are added if their canonical values do not contain
# type parameters.
ignore = set(annotated_names)
# enums should not print "name" and "value" for instances.
if v.is_enum:
ignore.update(("name", "_name_", "value", "_value_"))
canonical_attributes = set()
def add_attributes_from(instance):
for name, member in instance.members.items():
if name in abstract_utils.CLASS_LEVEL_IGNORE or name in ignore:
continue
for value in member.FilteredData(self.ctx.exitpoint, strict=False):
typ = value.to_pytd_type(node)
if pytd_utils.GetTypeParameters(typ):
# This attribute's type comes from an annotation that contains a
# type parameter; we do not want to merge in substituted values of
# the type parameter.
canonical_attributes.add(name)
if v.is_enum:
# If the containing class (v) is an enum, then output the instance
# attributes as properties.
# https://typing.readthedocs.io/en/latest/stubs.html#enums
typ = pytd.Annotated(typ, ("'property'",))
constants[name].add_type(typ)
for instance in v.canonical_instances:
add_attributes_from(instance)
ignore |= canonical_attributes
for instance in v.instances - v.canonical_instances:
add_attributes_from(instance)
for name in list(methods):
if name in constants:
# If something is both a constant and a method, it means that the class
# is, at some point, overwriting its own methods with an attribute.
del methods[name]
constants[name].add_type(pytd.AnythingType())
if isinstance(v, named_tuple.NamedTupleClass):
# Filter out generated members from namedtuples
cls_bases = v.props.bases
fieldnames = [x.name for x in v.props.fields]
methods = {
k: m for k, m in methods.items() if k not in v.generated_members
}
constants = {
k: c for k, c in constants.items() if k not in v.generated_members
}
for k, c in constants.items():
# If we have added class members to a namedtuple, do not emit them as
# regular fields.
if k not in fieldnames:
c.wrap("typing.ClassVar")
slots = None
else:
cls_bases = v.bases()
slots = v.slots
metaclass = v.metaclass(node)
if metaclass is None:
keywords = ()
else:
metaclass = metaclass.to_pytd_type_of_instance(node)
keywords = (("metaclass", metaclass),)
# Some of the class's bases may not be in global scope, so they won't show
# up in the output. In that case, fold the base class's type information
# into this class's pytd.
bases = []
missing_bases = []
for basevar in cls_bases:
if len(basevar.bindings) == 1:
(b,) = basevar.data
if b.official_name is None and isinstance(b, abstract.InterpreterClass):
missing_bases.append(b)
else:
bases.append(b.to_pytd_type_of_instance(node))
else:
bases.append(
pytd_utils.JoinTypes(
b.to_pytd_type_of_instance(node) for b in basevar.data
)
)
# If a namedtuple was constructed via one of the functional forms, it will
# not have a base class. Since we uniformly output all namedtuple classes as
# subclasses of typing.NamedTuple we need to add it in here.
if isinstance(v, named_tuple.NamedTupleClass):
if not any(x.name == "typing.NamedTuple" for x in bases):
bases.append(pytd.NamedType("typing.NamedTuple"))
has_namedtuple_parent = False
parent_field_names = set()
for x in missing_bases:
if isinstance(x, named_tuple.NamedTupleClass):
has_namedtuple_parent = True
parent_field_names.update(field.name for field in x.props.fields)
if has_namedtuple_parent:
# If inheriting from an anonymous namedtuple, mark all derived class
# constants as ClassVars, otherwise MergeBaseClasses will convert them
# into namedtuple fields.
for k, c in constants.items():
if k not in parent_field_names:
c.wrap("typing.ClassVar")
final_constants = []
skip = set()
if isinstance(v, named_tuple.NamedTupleClass):
# The most precise way to get defaults is to check v.__new__.__defaults__,
# since it's possible for the user to manually set __defaults__. If
# retrieving this attribute fails, we fall back to the defaults set when
# the class was built.
try:
new = abstract_utils.get_atomic_value(
v.members["__new__"], abstract.SignedFunction
)
except abstract_utils.ConversionError:
fields_with_defaults = {f.name for f in v.props.fields if f.default}
else:
fields_with_defaults = set(new.signature.defaults)
elif abstract_utils.is_dataclass(v):
fields = v.metadata["__dataclass_fields__"]
fields_with_defaults = {f.name for f in fields if f.default}
skip.add("__dataclass_fields__")
skip.add("__match_args__")
elif abstract_utils.is_attrs(v):
fields = v.metadata["__attrs_attrs__"]
fields_with_defaults = {f.name for f in fields if f.default}
skip.add("__attrs_attrs__")
else:
fields_with_defaults = set()
for name, builder in constants.items():
if not builder or name in skip:
continue
value = pytd.AnythingType() if name in fields_with_defaults else None
final_constants.append(pytd.Constant(name, builder.build(), value))
cls = pytd.Class(
name=class_name,
keywords=keywords,
bases=tuple(bases),
methods=tuple(methods.values()),
constants=tuple(final_constants),
classes=tuple(classes),
decorators=tuple(decorators),
slots=slots,
template=(),
)
for base in missing_bases:
base_cls = self.value_to_pytd_def(node, base, base.name)
cls = pytd_utils.MergeBaseClass(cls, base_cls)
self._scopes.pop()
return cls
def _type_variable_to_def(self, node, v, name):
constraints = tuple(c.to_pytd_type_of_instance(node) for c in v.constraints)
bound = v.bound and v.bound.to_pytd_type_of_instance(node)
if isinstance(v, abstract.TypeParameter):
return pytd.TypeParameter(name, constraints=constraints, bound=bound)
elif isinstance(v, abstract.ParamSpec):
return pytd.ParamSpec(name, constraints=constraints, bound=bound)
else:
assert False, f"Unexpected type variable type: {type(v)}"
def _typed_dict_to_def(self, node, v, name):
keywords = []
if not v.props.total:
keywords.append(("total", pytd.Literal(False)))
bases = (pytd.NamedType("typing.TypedDict"),)
constants = []
for k, val in v.props.fields.items():
typ = self.value_instance_to_pytd_type(node, val, None, set(), {})
if v.props.total and k not in v.props.required:
typ = pytd.GenericType(pytd.NamedType("typing.NotRequired"), (typ,))
elif not v.props.total and k in v.props.required:
typ = pytd.GenericType(pytd.NamedType("typing.Required"), (typ,))
constants.append(pytd.Constant(k, typ))
return pytd.Class(
name=name,
keywords=tuple(keywords),
bases=bases,
methods=(),
constants=tuple(constants),
classes=(),
decorators=(),
slots=None,
template=(),
)
|
Converter
|
python
|
huggingface__transformers
|
src/transformers/models/exaone4/modeling_exaone4.py
|
{
"start": 15551,
"end": 16135
}
|
class ____(PreTrainedModel):
config: Exaone4Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Exaone4DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Exaone4DecoderLayer,
"attentions": Exaone4Attention,
}
config_class = Exaone4Config
@auto_docstring
|
Exaone4PreTrainedModel
|
python
|
astropy__astropy
|
astropy/io/fits/tests/test_table.py
|
{
"start": 134927,
"end": 154849
}
|
class ____(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
if recformat == "S":
fitsformat = "0A"
c = fits.Column("TEST", np.dtype(recformat))
assert c.format == fitsformat
c = fits.Column("TEST", recformat)
assert c.format == fitsformat
c = fits.Column("TEST", fitsformat)
assert c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column("TEST", "I4")
assert c.format == "I4"
assert c.format.format == "I"
assert c.format.width == 4
c = fits.Column("TEST", "F15.8")
assert c.format == "F15.8"
assert c.format.format == "F"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "E15.8")
assert c.format.format == "E"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "D15.8")
assert c.format.format == "D"
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column("TEST", "F10.0")
assert c.format.format == "F"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "E10.0")
assert c.format.format == "E"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "D10.0")
assert c.format.format == "D"
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column("TEST", "I")
assert c.format == "I"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I", ascii=True)
assert c.format == "I10"
assert c.format.recformat == "i4"
# With specified widths, integer precision should be set appropriately
c = fits.Column("TEST", "I4", ascii=True)
assert c.format == "I4"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I9", ascii=True)
assert c.format == "I9"
assert c.format.recformat == "i4"
c = fits.Column("TEST", "I12", ascii=True)
assert c.format == "I12"
assert c.format.recformat == "i8"
c = fits.Column("TEST", "E")
assert c.format == "E"
assert c.format.recformat == "f4"
c = fits.Column("TEST", "E", ascii=True)
assert c.format == "E15.7"
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column("TEST", "F")
assert c.format == "F16.7"
c = fits.Column("TEST", "D")
assert c.format == "D"
assert c.format.recformat == "f8"
c = fits.Column("TEST", "D", ascii=True)
assert c.format == "D25.17"
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column("TEST", "F5.0", array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].header["TFORM1"] == "F5.0"
assert hdul[1].data["TEST"].dtype == np.dtype("float64")
assert np.all(hdul[1].data["TEST"] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, "TEST")
assert raw.tobytes() == b" 1. 2. 3."
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[("A", "<u4", (2,)), ("B", ">u2")])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs["A"].bzero
assert 2**15 == col_defs["B"].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name="a", format="D")
b = fits.Column(name="b", format="D")
cols = fits.ColDefs([a, b])
assert cols["a"] == cols[0]
assert cols["b"] == cols[1]
def test_column_membership(self):
"""Tests that the membership operator can be used for `ColDefs`."""
a = fits.Column(name="a", format="D")
b = fits.Column(name="b", format="D")
c = fits.Column(name="c", format="D")
cols = fits.ColDefs([a, b])
# String tests
assert "a" in cols
assert "b" in cols
assert "c" not in cols
# Column tests
assert a in cols
assert b in cols
assert c not in cols
# General case (false)
assert 1 not in cols
assert cols not in cols
assert [a, b] not in cols
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns(
[fits.Column("a", format="D"), fits.Column("b", format="D")]
)
b = table.columns["b"]
table.columns.del_col("b")
assert table.data.dtype.names == ("a",)
b.name = "HELLO"
assert b.name == "HELLO"
assert "TTYPE2" not in table.header
assert table.header["TTYPE1"] == "a"
assert table.columns.names == ["a"]
with pytest.raises(KeyError):
table.columns["b"]
# Make sure updates to the remaining column still work
table.columns.change_name("a", "GOODBYE")
with pytest.raises(KeyError):
table.columns["a"]
assert table.columns["GOODBYE"].name == "GOODBYE"
assert table.data.dtype.names == ("GOODBYE",)
assert table.columns.names == ["GOODBYE"]
assert table.data.columns.names == ["GOODBYE"]
table.columns["GOODBYE"].name = "foo"
with pytest.raises(KeyError):
table.columns["GOODBYE"]
assert table.columns["foo"].name == "foo"
assert table.data.dtype.names == ("foo",)
assert table.columns.names == ["foo"]
assert table.data.columns.names == ["foo"]
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column("xcol", format="5X", array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column("pcol", format="PJ", array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column("qcol", format="QJ", array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format="I", array=[1, 2, 3, 4, 5])
assert "Column name must be a string able to fit" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column(
"col",
format=0,
null="Nan",
disp=1,
coord_type=1,
coord_unit=2,
coord_inc="1",
time_ref_pos=1,
coord_ref_point="1",
coord_ref_value="1",
)
err_msgs = [
"keyword arguments to Column were invalid",
"TFORM",
"TNULL",
"TDISP",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
]
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="B", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="-56", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(
err.value
)
@pytest.mark.parametrize(
"keys",
[
{"TFORM": "Z", "TDISP": "E"},
{"TFORM": "2", "TDISP": "2E"},
{"TFORM": 3, "TDISP": 6.3},
{"TFORM": float, "TDISP": np.float64},
{"TFORM": "", "TDISP": "E.5"},
],
)
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("col", format=keys["TFORM"], disp=keys["TDISP"])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name="a", array=x, format="E")
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header["TTYPE1"]
hdu.columns[0].name = "b"
def test_table_to_hdu():
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = "m/s"
table["b"].unit = "not-a-unit"
table.meta["foo"] = "bar"
with pytest.warns(
UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.BinTableHDU(table, header=fits.Header({"TEST": 1}))
assert len(w) == 1
for name in "abc":
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2")
assert hdu.header["FOO"] == "bar"
assert hdu.header["TEST"] == 1
with pytest.warns(
UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.BinTableHDU(table, character_as_bytes=True)
assert np.array_equal(hdu.data["b"], [b"a", b"b", b"c"])
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)], dtype=[("x", float), ("y", int)]).view(
fits.FITS_rec
)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_empty_table(tmp_path):
ofile = tmp_path / "emptytable.fits"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
ofile = tmp_path / "emptytable.fits.gz"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
def test_a3dtable(tmp_path):
testfile = tmp_path / "test.fits"
hdu = fits.BinTableHDU.from_columns(
[fits.Column(name="FOO", format="J", array=np.arange(10))]
)
hdu.header["XTENSION"] = "A3DTABLE"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].header["XTENSION"] == "A3DTABLE"
with pytest.warns(AstropyUserWarning) as w:
hdul.verify("fix")
assert str(w[0].message) == "Verification reported errors:"
assert str(w[2].message).endswith("Converted the XTENSION keyword to BINTABLE.")
assert hdul[1].header["XTENSION"] == "BINTABLE"
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header["FOO"] = None
hdu.header.cards["FOO"]._value = np.nan
testfile = tmp_path / "test.fits"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / "invalid_unit.fits"
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = "1 / (MeV sr s)"
unit = Unit(invalid_unit)
t = Table({"a": [1, 2, 3]})
t.write(path)
with fits.open(path, mode="update") as hdul:
hdul[1].header["TUNIT1"] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t["a"].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict="silent")
assert isinstance(t["a"].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict="raise")
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict="warn")
def test_invalid_table_array():
# see https://github.com/astropy/astropy/issues/4580
data = np.empty((5, 100), dtype=[("w", ">f8"), ("f", ">f4")])
with pytest.raises(
ValueError,
match=(
r"Input data with shape \(5, 100\) is not a valid "
r"representation of a row-oriented table\."
),
):
fits.BinTableHDU(data, name="DATA")
def test_repr_scaling(tmp_path):
cols = [
fits.Column(name="a", array=np.array([1, 2]), format="I"),
fits.Column(name="b", array=np.array([1, 2]), format="I"),
]
hdu = fits.BinTableHDU.from_columns(cols)
hdu.header["TSCAL2"] = 0.1
hdu.header["TZERO2"] = 10
hdu.writeto(tmp_path / "test.fits")
data = fits.getdata(tmp_path / "test.fits")
assert repr(data) == (
"FITS_rec([(1, 10.1), (2, 10.2)],\n"
" dtype=(numpy.record, [('a', '>i2'), ('b', '>i2')]))"
)
def test_one_row_string_column(tmp_path):
# Issue #18174 control. One-row table should still read/write normally after zero row fix
data = np.zeros((1, 3), dtype="|S8")
col = fits.Column(name="FOO", format="24A", dim="(8,3)", array=data)
hdul = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([col])])
outfile = tmp_path / "test.fits"
hdul.writeto(outfile)
with fits.open(outfile) as hdul:
table_data = hdul[1].data
assert table_data.shape[0] == 1
def test_zero_row_string_column(tmp_path):
# issue #18174 writing a zero row BinTableHDU with multidimensional string column
data = np.zeros((0, 3), dtype="|S8")
col = fits.Column(name="FOO", format="24A", dim="(8,3)", array=data)
hdul = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([col])])
outfile = tmp_path / "test.fits"
hdul.writeto(outfile)
# re-open and check for zero rows
with fits.open(outfile) as hdul:
table_data = hdul[1].data
assert table_data.shape[0] == 0
|
TestColumnFunctions
|
python
|
keras-team__keras
|
keras/src/backend/tensorflow/layer.py
|
{
"start": 198,
"end": 4334
}
|
class ____(KerasAutoTrackable):
def __init__(self, *args, **kwargs):
# Export-related attributes
self._saved_model_inputs_spec = None
self._saved_model_arg_spec = None
self._tracked = []
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _set_save_spec(self, inputs, args=None, kwargs=None):
"""Defines the save spec so that serialization can trace layer calls.
The TensorSpecs of the call function `inputs`, `args`, and `kwargs` are
saved into a tuple of `([inputs] + args, kwargs)`.
Args:
inputs: possibly nested inputs passed into the call function.
args: a list of positional arguments passed into call.
kwargs: a dictionary of keyword arguments passed into call.
"""
if self._saved_model_inputs_spec is not None:
return # Already set.
inputs_spec = tree.map_structure(tf_utils.get_tensor_spec, inputs)
args_spec = tree.map_structure(tf_utils.get_tensor_spec, args or [])
kwargs_spec = {}
# Filter out non-tensor arguments from kwargs.
for key, kwarg in kwargs.items():
flat_kwarg = tree.flatten(kwarg)
flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg]
if any(s is None for s in flat_specs):
continue
kwargs_spec[key] = tree.pack_sequence_as(kwarg, flat_specs)
self._saved_model_inputs_spec = inputs_spec
self._saved_model_arg_spec = (
[inputs_spec] + list(args_spec),
kwargs_spec,
)
def _trackable_children(self, save_type="checkpoint", **kwargs):
if save_type == "savedmodel":
# SavedModel needs to ignore the execution functions.
train_function = getattr(self, "train_function", None)
test_function = getattr(self, "test_function", None)
predict_function = getattr(self, "predict_function", None)
self.train_function = None
self.test_function = None
self.predict_function = None
children = super()._trackable_children(save_type, **kwargs)
if save_type == "savedmodel":
self.train_function = train_function
self.test_function = test_function
self.predict_function = predict_function
for tracked_attr in self._tracked:
tracked_item = getattr(self, tracked_attr)
if isinstance(tracked_item, tracking.TrackedList):
children[tracked_attr] = list(tracked_item)
if isinstance(tracked_item, tracking.TrackedDict):
children[tracked_attr] = dict(tracked_item)
if isinstance(tracked_item, tracking.TrackedSet):
children[tracked_attr] = list(tracked_item)
return children
@property
def _default_save_signature(self):
"""For SavedModel support: returns the default serving signature."""
from keras.src.models.functional import Functional
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
if not isinstance(self, Model):
return None
inputs = None
if (
isinstance(self, Sequential)
and getattr(self, "_functional", None) is not None
):
inputs = self._functional.input
elif isinstance(self, Functional):
inputs = self.input
if inputs is not None:
input_signature = (
tree.map_structure(
lambda x: tf.TensorSpec(x.shape, x.dtype), inputs
),
)
else:
input_signature = tuple(
tree.map_shape_structure(
lambda s: tf.TensorSpec(s, self.input_dtype), value
)
for value in self._build_shapes_dict.values()
)
@tf.function(input_signature=input_signature)
def serving_default(inputs):
return self(inputs)
return serving_default
|
TFLayer
|
python
|
spack__spack
|
lib/spack/spack/ci/generator_registry.py
|
{
"start": 803,
"end": 974
}
|
class ____(spack.error.SpackError):
def __init__(self, generator_name):
super().__init__(f"No registered generator for {generator_name}")
|
UnknownGeneratorException
|
python
|
openai__openai-python
|
src/openai/types/beta/threads/message_create_params.py
|
{
"start": 1891,
"end": 2083
}
|
class ____(TypedDict, total=False):
file_id: str
"""The ID of the file to attach to the message."""
tools: Iterable[AttachmentTool]
"""The tools to add this file to."""
|
Attachment
|
python
|
numba__numba
|
numba/pycc/compiler.py
|
{
"start": 1700,
"end": 13204
}
|
class ____(object):
"""A base class to compile Python modules to a single shared library or
extension module.
:param export_entries: a list of ExportEntry instances.
:param module_name: the name of the exported module.
"""
#: Structure used to describe a method of an extension type.
#: struct PyMethodDef {
#: const char *ml_name; /* The name of the built-in function/method */
#: PyCFunction ml_meth; /* The C function that implements it */
#: int ml_flags; /* Combination of METH_xxx flags, which mostly
#: describe the args expected by the C func */
#: const char *ml_doc; /* The __doc__ attribute, or NULL */
#: };
method_def_ty = ir.LiteralStructType((lt._int8_star,
lt._void_star,
lt._int32,
lt._int8_star))
method_def_ptr = ir.PointerType(method_def_ty)
# The structure type constructed by PythonAPI.serialize_uncached()
# when updating this, also make sure to update `env_def_t` struct in
# numba/pycc/modulemixin.c
env_def_ty = ir.LiteralStructType((lt._void_star,
lt._int32,
lt._void_star,
lt._void_star,
lt._int32))
env_def_ptr = ir.PointerType(env_def_ty)
def __init__(self, export_entries, module_name, use_nrt=False,
**aot_options):
self.module_name = module_name
self.export_python_wrap = False
self.dll_exports = []
self.export_entries = export_entries
# Used by the CC API but not the legacy API
self.external_init_function = None
self.use_nrt = use_nrt
self.typing_context = cpu_target.typing_context
self.context = cpu_target.target_context.with_aot_codegen(
self.module_name, **aot_options)
def _mangle_method_symbol(self, func_name):
return "._pycc_method_%s" % (func_name,)
def _emit_python_wrapper(self, llvm_module):
"""Emit generated Python wrapper and extension module code.
"""
raise NotImplementedError
@global_compiler_lock
def _cull_exports(self):
"""Read all the exported functions/modules in the translator
environment, and join them into a single LLVM module.
"""
self.exported_function_types = {}
self.function_environments = {}
self.environment_gvs = {}
self.extra_environments = {}
codegen = self.context.codegen()
library = codegen.create_library(self.module_name)
# Generate IR for all exported functions
flags = Flags()
flags.no_compile = True
if not self.export_python_wrap:
flags.no_cpython_wrapper = True
flags.no_cfunc_wrapper = True
if self.use_nrt:
flags.nrt = True
# Compile NRT helpers
nrt_module, _ = nrtdynmod.create_nrt_module(self.context)
library.add_ir_module(nrt_module)
for entry in self.export_entries:
cres = compile_extra(self.typing_context, self.context,
entry.function,
entry.signature.args,
entry.signature.return_type, flags,
locals={}, library=library)
# Fix up dynamic exc globals
module = library._final_module
for gv in module.functions:
if gv.name.startswith("__excinfo_unwrap_args"):
gv.linkage = "linkonce_odr"
func_name = cres.fndesc.llvm_func_name
llvm_func = cres.library.get_function(func_name)
if self.export_python_wrap:
llvm_func.linkage = 'internal'
wrappername = cres.fndesc.llvm_cpython_wrapper_name
wrapper = cres.library.get_function(wrappername)
wrapper.name = self._mangle_method_symbol(entry.symbol)
wrapper.linkage = 'external'
fnty = cres.target_context.call_conv.get_function_type(
cres.fndesc.restype, cres.fndesc.argtypes)
self.exported_function_types[entry] = fnty
self.function_environments[entry] = cres.environment
self.environment_gvs[entry] = cres.fndesc.env_name
# Search for extra environments from linked libraries
for linkedlib in library._linking_libraries:
linkedmod = linkedlib._final_module
# Find environments
for gv in linkedmod.global_variables:
gvn = gv.name
if gvn.startswith("_ZN08NumbaEnv"):
env = lookup_environment(gvn)
if env is not None:
if env.can_cache():
self.extra_environments[gvn] = env
else:
llvm_func.name = entry.symbol
self.dll_exports.append(entry.symbol)
if self.export_python_wrap:
wrapper_module = library.create_ir_module("wrapper")
self._emit_python_wrapper(wrapper_module)
library.add_ir_module(wrapper_module)
# Hide all functions in the DLL except those explicitly exported
library.finalize()
for fn in library.get_defined_functions():
if fn.name not in self.dll_exports:
if fn.linkage in {Linkage.private, Linkage.internal}:
# Private/Internal linkage must have "default" visibility
fn.visibility = "default"
else:
fn.visibility = 'hidden'
return library
def write_llvm_bitcode(self, output, wrap=False, **kws):
self.export_python_wrap = wrap
library = self._cull_exports()
with open(output, 'wb') as fout:
fout.write(library.emit_bitcode())
def write_native_object(self, output, wrap=False, **kws):
self.export_python_wrap = wrap
library = self._cull_exports()
with open(output, 'wb') as fout:
fout.write(library.emit_native_object())
def emit_type(self, tyobj):
ret_val = str(tyobj)
if 'int' in ret_val:
if ret_val.endswith(('8', '16', '32', '64')):
ret_val += "_t"
return ret_val
def emit_header(self, output):
fname, ext = os.path.splitext(output)
with open(fname + '.h', 'w') as fout:
fout.write(get_header())
fout.write("\n/* Prototypes */\n")
for export_entry in self.export_entries:
name = export_entry.symbol
restype = self.emit_type(export_entry.signature.return_type)
args = ", ".join(self.emit_type(argtype)
for argtype in export_entry.signature.args)
fout.write("extern %s %s(%s);\n" % (restype, name, args))
def _emit_method_array(self, llvm_module):
"""
Collect exported methods and emit a PyMethodDef array.
:returns: a pointer to the PyMethodDef array.
"""
method_defs = []
for entry in self.export_entries:
name = entry.symbol
llvm_func_name = self._mangle_method_symbol(name)
fnty = self.exported_function_types[entry]
lfunc = ir.Function(llvm_module, fnty, llvm_func_name)
method_name = self.context.insert_const_string(llvm_module, name)
method_def_const = ir.Constant.literal_struct(
(method_name,
ir.Constant.bitcast(lfunc, lt._void_star),
METH_VARARGS_AND_KEYWORDS,
NULL))
method_defs.append(method_def_const)
sentinel = ir.Constant.literal_struct([NULL, NULL, ZERO, NULL])
method_defs.append(sentinel)
method_array_init = create_constant_array(self.method_def_ty, method_defs)
method_array = cgutils.add_global_variable(llvm_module,
method_array_init.type,
'.module_methods')
method_array.initializer = method_array_init
method_array.linkage = 'internal'
method_array_ptr = ir.Constant.gep(method_array, [ZERO, ZERO])
return method_array_ptr
def _emit_environment_array(self, llvm_module, builder, pyapi):
"""
Emit an array of env_def_t structures (see modulemixin.c)
storing the pickled environment constants for each of the
exported functions.
"""
env_defs = []
for entry in self.export_entries:
env = self.function_environments[entry]
# Constants may be unhashable so avoid trying to cache them
env_def = pyapi.serialize_uncached(env.consts)
env_defs.append(env_def)
# Append extra environments
env_defs.extend([
pyapi.serialize_uncached(env.consts)
for env in self.extra_environments.values()
])
env_defs_init = create_constant_array(self.env_def_ty, env_defs)
gv = self.context.insert_unique_const(llvm_module,
'.module_environments',
env_defs_init)
return gv.gep([ZERO, ZERO])
def _emit_envgvs_array(self, llvm_module, builder, pyapi):
"""
Emit an array of Environment pointers that needs to be filled at
initialization.
"""
env_setters = []
for entry in self.export_entries:
envgv_name = self.environment_gvs[entry]
gv = self.context.declare_env_global(llvm_module, envgv_name)
envgv = gv.bitcast(lt._void_star)
env_setters.append(envgv)
# Append extra environments
env_setters.extend([
self.context.declare_env_global(
llvm_module,
envgv_name
).bitcast(lt._void_star)
for envgv_name in self.extra_environments
])
# The array ends with NULL
env_setters.append(lt._void_star(None))
env_setters_init = create_constant_array(lt._void_star, env_setters)
gv = self.context.insert_unique_const(llvm_module,
'.module_envgvs',
env_setters_init)
return gv.gep([ZERO, ZERO])
def _emit_module_init_code(self, llvm_module, builder, modobj,
method_array, env_array, envgv_array):
"""
Emit call to "external" init function, if any.
"""
if self.external_init_function:
fnty = ir.FunctionType(lt._int32,
[modobj.type, self.method_def_ptr,
self.env_def_ptr, envgv_array.type])
fn = ir.Function(llvm_module, fnty, self.external_init_function)
return builder.call(fn, [modobj, method_array, env_array,
envgv_array])
else:
return None
|
_ModuleCompiler
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/tensor_to_test.py
|
{
"start": 1276,
"end": 1956
}
|
class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype_one, dtype_two, device):
self.inputs = {
"input": torch.rand(
M, N, device=device, requires_grad=False, dtype=torch.float
).to(dtype=dtype_one)
}
self.dtype_one = dtype_one
self.dtype_two = dtype_two
def forward(self, input):
return input.to(dtype=self.dtype_two)
op_bench.generate_pt_test(tensor_conversion_short_configs, TensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, TensorConversionBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
TensorConversionBenchmark
|
python
|
pytorch__pytorch
|
test/onnx/exporter/test_verification.py
|
{
"start": 3305,
"end": 4180
}
|
class ____(common_utils.TestCase):
def test_interpreter_stores_correct_info(self):
class Model(torch.nn.Module):
def forward(self, a, b):
c = a + b
return c - 1
model = Model()
args = (torch.tensor([1.0]), torch.tensor([2.0]))
onnx_program = torch.onnx.export(model, args, dynamo=True, verbose=False)
assert onnx_program is not None
interpreter = _verification._VerificationInterpreter(onnx_program)
results = interpreter.run(args)
torch.testing.assert_close(results, model(*args))
verification_infos = interpreter.verification_infos
self.assertEqual(len(verification_infos), 3)
for info in verification_infos:
self.assertEqual(info.max_abs_diff, 0)
self.assertEqual(info.max_rel_diff, 0)
|
VerificationInterpreterTest
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cli/argparsing/__init__.py
|
{
"start": 523,
"end": 2740
}
|
class ____(OptionCompletionFinder):
"""
Custom option completion finder for argcomplete which allows completion results to be registered.
These registered completions, if provided, are used to filter the final completion results.
This works around a known bug: https://github.com/kislyuk/argcomplete/issues/221
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.registered_completions: t.Optional[list[str]] = None
def completer(
self,
prefix: str,
action: argparse.Action,
parsed_args: argparse.Namespace,
**kwargs,
) -> list[str]:
"""
Return a list of completions for the specified prefix and action.
Use this as the completer function for argcomplete.
"""
kwargs.clear()
del kwargs
completions = self.get_completions(prefix, action, parsed_args)
if action.nargs and not isinstance(action.nargs, int):
# prevent argcomplete from including unrelated arguments in the completion results
self.registered_completions = completions
return completions
@abc.abstractmethod
def get_completions(
self,
prefix: str,
action: argparse.Action,
parsed_args: argparse.Namespace,
) -> list[str]:
"""
Return a list of completions for the specified prefix and action.
Called by the complete function.
"""
def quote_completions(self, completions, cword_prequote, last_wordbreak_pos):
"""Modify completion results before returning them."""
if self.registered_completions is not None:
# If one of the completion handlers registered their results, only allow those exact results to be returned.
# This prevents argcomplete from adding results from other completers when they are known to be invalid.
allowed_completions = set(self.registered_completions)
completions = [completion for completion in completions if completion in allowed_completions]
return super().quote_completions(completions, cword_prequote, last_wordbreak_pos)
|
RegisteredCompletionFinder
|
python
|
streamlit__streamlit
|
lib/streamlit/web/server/routes.py
|
{
"start": 4301,
"end": 4426
}
|
class ____(tornado.web.RequestHandler):
@tornado.web.removeslash
def get(self) -> None:
pass
|
RemoveSlashHandler
|
python
|
getsentry__sentry
|
src/sentry/utils/locking/backends/redis.py
|
{
"start": 3038,
"end": 3658
}
|
class ____(BaseRedisLockBackend):
cluster: RedisCluster[str] | StrictRedis[str]
def __init__(
self,
cluster: str | RedisCluster[str] | StrictRedis[str],
prefix: str = "l:",
uuid: str | None = None,
):
if isinstance(cluster, str):
cluster = redis.redis_clusters.get(cluster)
super().__init__(cluster, prefix=prefix, uuid=uuid)
def get_client(
self, key: str, routing_key: int | str | None = None
) -> RedisCluster[str] | StrictRedis[str]:
return self.cluster
RedisLockBackend = RedisBlasterLockBackend
|
RedisClusterLockBackend
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/timedeltas/test_indexing.py
|
{
"start": 4225,
"end": 6001
}
|
class ____:
def test_where_doesnt_retain_freq(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
cond = [True, True, False]
expected = TimedeltaIndex([tdi[0], tdi[1], tdi[0]], freq=None, name="idx")
result = tdi.where(cond, tdi[::-1])
tm.assert_index_equal(result, expected)
def test_where_invalid_dtypes(self, fixed_now_ts):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
tail = tdi[2:].tolist()
i2 = Index([NaT, NaT] + tail)
mask = notna(i2)
expected = Index([NaT._value, NaT._value] + tail, dtype=object, name="idx")
assert isinstance(expected[0], int)
result = tdi.where(mask, i2.asi8)
tm.assert_index_equal(result, expected)
ts = i2 + fixed_now_ts
expected = Index([ts[0], ts[1]] + tail, dtype=object, name="idx")
result = tdi.where(mask, ts)
tm.assert_index_equal(result, expected)
per = (i2 + fixed_now_ts).to_period("D")
expected = Index([per[0], per[1]] + tail, dtype=object, name="idx")
result = tdi.where(mask, per)
tm.assert_index_equal(result, expected)
ts = fixed_now_ts
expected = Index([ts, ts] + tail, dtype=object, name="idx")
result = tdi.where(mask, ts)
tm.assert_index_equal(result, expected)
def test_where_mismatched_nat(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
cond = np.array([True, False, False])
dtnat = np.datetime64("NaT", "ns")
expected = Index([tdi[0], dtnat, dtnat], dtype=object, name="idx")
assert expected[2] is dtnat
result = tdi.where(cond, dtnat)
tm.assert_index_equal(result, expected)
|
TestWhere
|
python
|
fluentpython__example-code
|
attic/dicts/test_transformdict.py
|
{
"start": 9317,
"end": 9366
}
|
class ____(TransformDict):
pass
|
MyTransformDict
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/workers.py
|
{
"start": 29519,
"end": 32439
}
|
class ____(NonStrictDataModel):
"""
:param id: Worker ID
:type id: str
:param name: Worker name
:type name: str
:param next_task: Next task in the queue
:type next_task: IdNameEntry
:param num_tasks: Number of task entries in the queue
:type num_tasks: int
"""
_schema = {
"properties": {
"id": {"description": "Worker ID", "type": ["string", "null"]},
"name": {"description": "Worker name", "type": ["string", "null"]},
"next_task": {
"description": "Next task in the queue",
"oneOf": [{"$ref": "#/definitions/id_name_entry"}, {"type": "null"}],
},
"num_tasks": {
"description": "Number of task entries in the queue",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
next_task: Any = None,
num_tasks: Optional[int] = None,
**kwargs: Any
) -> None:
super(QueueEntry, self).__init__(**kwargs)
self.id = id
self.name = name
self.next_task = next_task
self.num_tasks = num_tasks
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("next_task")
def next_task(self) -> Any:
return self._property_next_task
@next_task.setter
def next_task(self, value: Any) -> None:
if value is None:
self._property_next_task = None
return
if isinstance(value, dict):
value = IdNameEntry.from_dict(value)
else:
self.assert_isinstance(value, "next_task", IdNameEntry)
self._property_next_task = value
@schema_property("num_tasks")
def num_tasks(self) -> Optional[int]:
return self._property_num_tasks
@num_tasks.setter
def num_tasks(self, value: Optional[int]) -> None:
if value is None:
self._property_num_tasks = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "num_tasks", six.integer_types)
self._property_num_tasks = value
|
QueueEntry
|
python
|
getsentry__sentry
|
src/sentry/relay/types/generic_filters.py
|
{
"start": 111,
"end": 282
}
|
class ____(TypedDict):
"""Configuration for a generic filter that filters incoming events."""
id: str
isEnabled: bool
condition: RuleCondition
|
GenericFilter
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-ray/prefect_ray/context.py
|
{
"start": 214,
"end": 2014
}
|
class ____(ContextModel):
"""
The context for Ray remote_options management.
Attributes:
current_remote_options: A set of current remote_options in the context.
"""
__var__: ContextVar = ContextVar("remote_options")
current_remote_options: Dict[str, Any] = Field(default_factory=dict)
@classmethod
def get(cls) -> "RemoteOptionsContext":
"""
Return an empty `RemoteOptionsContext`
instead of `None` if no context exists.
"""
return cls.__var__.get(RemoteOptionsContext())
__var__ = ContextVar("remote_options")
@contextmanager
def remote_options(
**new_remote_options: Dict[str, Any],
) -> Generator[None, Dict[str, Any], None]:
"""
Context manager to add keyword arguments to Ray `@remote` calls
for task runs. If contexts are nested, new options are merged with options
in the outer context. If a key is present in both, the new option will be used.
Yields:
The current set of remote options.
Examples:
Use 4 CPUs and 2 GPUs for the `process` task:
```python
from prefect import flow, task
from prefect_ray.task_runners import RayTaskRunner
from prefect_ray.context import remote_options
@task
def process(x):
return x + 1
@flow(task_runner=RayTaskRunner())
def my_flow():
# equivalent to setting @ray.remote(num_cpus=4, num_gpus=2)
with remote_options(num_cpus=4, num_gpus=2):
process.submit(42)
```
"""
current_remote_options = RemoteOptionsContext.get().current_remote_options
with RemoteOptionsContext(
current_remote_options={**current_remote_options, **new_remote_options}
):
yield
|
RemoteOptionsContext
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/views/private.py
|
{
"start": 39631,
"end": 42542
}
|
class ____(ProjectAdminMixin, PrivateViewMixin, TemplateView):
template_name = "projects/projects_search_analytics.html"
http_method_names = ["get"]
feature_type = TYPE_SEARCH_ANALYTICS
def get(self, request, *args, **kwargs):
download_data = request.GET.get("download", False)
if download_data:
return self._get_csv_data()
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
project = self.get_project()
enabled = bool(self._get_feature(project))
context.update({"enabled": enabled})
if not enabled:
return context
# data for plotting the line-chart
query_count_of_1_month = SearchQuery.generate_queries_count_of_one_month(
project.slug,
)
queries = []
qs = SearchQuery.objects.filter(project=project)
if qs.exists():
qs = (
qs.values("query")
.annotate(count=Count("id"))
.order_by("-count", "query")
.values_list("query", "count", "total_results")
)
# only show top 100 queries
queries = qs[:100]
context.update(
{
"queries": queries,
"query_count_of_1_month": query_count_of_1_month,
},
)
return context
def _get_csv_data(self):
"""Generate raw csv data of search queries."""
project = self.get_project()
now = timezone.now().date()
feature = self._get_feature(project)
if not feature:
raise Http404
if feature.unlimited:
days_ago = project.pub_date.date()
else:
days_ago = now - timezone.timedelta(days=feature.value)
values = [
("Created Date", "created"),
("Query", "query"),
("Total Results", "total_results"),
]
data = (
SearchQuery.objects.filter(
project=project,
created__date__gte=days_ago,
)
.order_by("-created")
.values_list(*[value for _, value in values])
)
filename = "readthedocs_search_analytics_{project_slug}_{start}_{end}.csv".format(
project_slug=project.slug,
start=timezone.datetime.strftime(days_ago, "%Y-%m-%d"),
end=timezone.datetime.strftime(now, "%Y-%m-%d"),
)
csv_data = [
[timezone.datetime.strftime(date, "%Y-%m-%d %H:%M:%S"), *rest] for date, *rest in data
]
csv_data.insert(0, [header for header, _ in values])
return get_csv_file(filename=filename, csv_data=csv_data)
def _get_feature(self, project):
return get_feature(project, feature_type=self.feature_type)
|
SearchAnalytics
|
python
|
huggingface__transformers
|
src/transformers/models/xlstm/modeling_xlstm.py
|
{
"start": 52982,
"end": 55560
}
|
class ____:
"""
Cache for xLSTM model which does not have attention mechanism and key value states.
Arguments:
config (`PreTrainedConfig):
The configuration file defining the shape-related attributes required to initialize the static cache.
max_batch_size (`int`):
The batch size with which the model will be used.
dtype (`torch.dtype`, *optional*, defaults to `torch.bfloat16`):
The default `dtype` to use when initializing the layer.
device (`torch.device` or `str`, *optional*):
The device on which the cache should be initialized. Should be the same as the layer.
Attributes:
seqlen_offset: int
dtype: torch.dtype
Example:
```python
>>> from transformers import AutoTokenizer, xLSTMForCausalLM, xLSTMCache
>>> model = xLSTMForCausalLM.from_pretrained("NX-AI/xLSTM-7b")
>>> tokenizer = xLSTMTokenizer.from_pretrained("NX-AI/xLSTM-7b")
>>> inputs = tokenizer(text="I am an xLSTM", return_tensors="pt")
>>> # Prepare a cache class and pass it to model's forward
>>> cache_params = xLSTMCache(config=model.config, max_batch_size=1, device=model.device, dtype=model.dtype)
>>> outputs = model(**inputs, cache_params=cache_params, use_cache=True)
>>> outputs.cache_params
xLSTMCache()
"""
def __init__(
self,
config: xLSTMConfig,
max_batch_size: int,
dtype: torch.dtype = torch.bfloat16,
device: Optional[str] = None,
**kwargs,
):
self.seqlen_offset = 0
self.dtype = dtype
self.config = config
self.rnn_state = {
layer: (
torch.zeros(
[max_batch_size, config.num_heads, config.qk_head_dim, config.v_head_dim],
dtype=dtype,
device=device,
),
torch.zeros([max_batch_size, config.num_heads, config.qk_head_dim], dtype=dtype, device=device),
torch.zeros([max_batch_size, config.num_heads, 1], dtype=dtype, device=device),
)
for layer in range(config.num_hidden_layers)
}
def reset(self):
self.rnn_state = {
layer: (
torch.zeros_like(self.rnn_state[layer][0]),
torch.zeros_like(self.rnn_state[layer][1]),
torch.zeros_like(self.rnn_state[layer][2]),
)
for layer in self.rnn_state
}
@dataclass
@auto_docstring
|
xLSTMCache
|
python
|
keras-team__keras
|
keras/src/regularizers/regularizers.py
|
{
"start": 7167,
"end": 7945
}
|
class ____(Regularizer):
"""A regularizer that applies a L1 regularization penalty.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
L1 may be passed to a layer as a string identifier:
>>> dense = Dense(3, kernel_regularizer='l1')
In this case, the default value used is `l1=0.01`.
Arguments:
l1: float, L1 regularization factor.
"""
def __init__(self, l1=0.01):
l1 = 0.01 if l1 is None else l1
validate_float_arg(l1, name="l1")
self.l1 = ops.convert_to_tensor(l1)
def __call__(self, x):
return self.l1 * ops.sum(ops.absolute(x))
def get_config(self):
return {"l1": float(self.l1)}
@keras_export(["keras.regularizers.L2", "keras.regularizers.l2"])
|
L1
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-hardcoded-records/source_hardcoded_records/streams.py
|
{
"start": 834,
"end": 3601
}
|
class ____(HardcodedStream):
sample_record = {
"id": 6569096478909,
"email": "test@test.com",
"created_at": "2023-04-13T02:30:04-07:00",
"updated_at": "2023-04-24T06:53:48-07:00",
"first_name": "New Test",
"last_name": "Customer",
"orders_count": 0,
"state": "disabled",
"total_spent": 0.0,
"last_order_id": None,
"note": "updated_mon_24.04.2023",
"verified_email": True,
"multipass_identifier": None,
"tax_exempt": False,
"tags": "",
"last_order_name": None,
"currency": "USD",
"phone": "+380639379992",
"addresses": [
{
"id": 8092523135165,
"customer_id": 6569096478909,
"first_name": "New Test",
"last_name": "Customer",
"company": "Test Company",
"address1": "My Best Accent",
"address2": "",
"city": "Fair Lawn",
"province": "New Jersey",
"country": "United States",
"zip": "07410",
"phone": "",
"name": "New Test Customer",
"province_code": "NJ",
"country_code": "US",
"country_name": "United States",
"default": True,
}
],
"accepts_marketing": True,
"accepts_marketing_updated_at": "2023-04-13T02:30:04-07:00",
"marketing_opt_in_level": "single_opt_in",
"tax_exemptions": "[]",
"email_marketing_consent": {
"state": "subscribed",
"opt_in_level": "single_opt_in",
"consent_updated_at": "2023-04-13T02:30:04-07:00",
},
"sms_marketing_consent": {
"state": "not_subscribed",
"opt_in_level": "single_opt_in",
"consent_updated_at": None,
"consent_collected_from": "SHOPIFY",
},
"admin_graphql_api_id": "gid://shopify/Customer/6569096478909",
"default_address": {
"id": 8092523135165,
"customer_id": 6569096478909,
"first_name": "New Test",
"last_name": "Customer",
"company": "Test Company",
"address1": "My Best Accent",
"address2": "",
"city": "Fair Lawn",
"province": "New Jersey",
"country": "United States",
"zip": "07410",
"phone": "",
"name": "New Test Customer",
"province_code": "NJ",
"country_code": "US",
"country_name": "United States",
"default": True,
},
"shop_url": "airbyte-integration-test",
}
|
Customers
|
python
|
paramiko__paramiko
|
tests/pkey.py
|
{
"start": 324,
"end": 9521
}
|
class ____:
# NOTE: this is incidentally tested by a number of other tests, such as the
# agent.py test suite
class from_type_string:
def loads_from_type_and_bytes(self, keys):
obj = PKey.from_type_string(keys.full_type, keys.pkey.asbytes())
assert obj == keys.pkey
# TODO: exceptions
#
# TODO: passphrase? OTOH since this is aimed at the agent...irrelephant
class from_path:
def loads_from_Path(self, keys):
obj = PKey.from_path(keys.path)
assert obj == keys.pkey
def loads_from_str(self):
key = PKey.from_path(str(_support("rsa.key")))
assert isinstance(key, RSAKey)
@patch("paramiko.pkey.Path")
def expands_user(self, mPath):
# real key for guts that want a real key format
mykey = Path(_support("rsa.key"))
pathy = mPath.return_value.expanduser.return_value
# read_bytes for cryptography.io's loaders
pathy.read_bytes.return_value = mykey.read_bytes()
# open() for our own class loader
pathy.open.return_value = mykey.open()
# fake out exists() to avoid attempts to load cert
pathy.exists.return_value = False
PKey.from_path("whatever") # we're not testing expanduser itself
# Both key and cert paths
mPath.return_value.expanduser.assert_has_calls([call(), call()])
def raises_UnknownKeyType_for_unknown_types(self):
# I.e. a real, becomes a useful object via cryptography.io, key
# class that we do NOT support. Chose Ed448 randomly as OpenSSH
# doesn't seem to support it either, going by ssh-keygen...
keypath = _support("ed448.key")
with raises(UnknownKeyType) as exc:
PKey.from_path(keypath)
assert issubclass(exc.value.key_type, Ed448PrivateKey)
with open(keypath, "rb") as fd:
assert exc.value.key_bytes == fd.read()
def leaves_cryptography_exceptions_untouched(self):
# a Python file is not a private key!
with raises(ValueError):
PKey.from_path(__file__)
# TODO: passphrase support tested
class automatically_loads_certificates:
def existing_cert_loaded_when_given_key_path(self):
key = PKey.from_path(_support("rsa.key"))
# Public blob exists despite no .load_certificate call
assert key.public_blob is not None
assert (
key.public_blob.key_type == "ssh-rsa-cert-v01@openssh.com"
)
# And it's definitely the one we expected
assert key.public_blob == PublicBlob.from_file(
_support("rsa.key-cert.pub")
)
def can_be_given_cert_path_instead(self):
key = PKey.from_path(_support("rsa.key-cert.pub"))
# It's still a key, not a PublicBlob
assert isinstance(key, RSAKey)
# Public blob exists despite no .load_certificate call
assert key.public_blob is not None
assert (
key.public_blob.key_type == "ssh-rsa-cert-v01@openssh.com"
)
# And it's definitely the one we expected
assert key.public_blob == PublicBlob.from_file(
_support("rsa.key-cert.pub")
)
def no_cert_load_if_no_cert(self):
# This key exists (it's a copy of the regular one) but has no
# matching -cert.pub
key = PKey.from_path(_support("rsa-lonely.key"))
assert key.public_blob is None
def excepts_usefully_if_no_key_only_cert(self):
# TODO: is that truly an error condition? the cert is ~the
# pubkey and we still require the privkey for signing, yea?
# This cert exists (it's a copy of the regular one) but there's
# no rsa-missing.key to load.
with raises(FileNotFoundError) as info:
PKey.from_path(_support("rsa-missing.key-cert.pub"))
assert info.value.filename.endswith("rsa-missing.key")
class load_certificate:
def rsa_public_cert_blobs(self):
# Data to test signing with (arbitrary)
data = b"ice weasels"
# Load key w/o cert at first (so avoiding .from_path)
key = RSAKey.from_private_key_file(_support("rsa.key"))
assert key.public_blob is None
# Sign regular-style (using, arbitrarily, SHA2)
msg = key.sign_ssh_data(data, "rsa-sha2-256")
msg.rewind()
assert "rsa-sha2-256" == msg.get_text()
signed = msg.get_binary() # for comparison later
# Load cert and inspect its internals
key.load_certificate(_support("rsa.key-cert.pub"))
assert key.public_blob is not None
assert key.public_blob.key_type == "ssh-rsa-cert-v01@openssh.com"
assert key.public_blob.comment == "test_rsa.key.pub"
msg = Message(key.public_blob.key_blob)
# cert type
assert msg.get_text() == "ssh-rsa-cert-v01@openssh.com"
# nonce
msg.get_string()
# public numbers
assert msg.get_mpint() == key.public_numbers.e
assert msg.get_mpint() == key.public_numbers.n
# serial number
assert msg.get_int64() == 1234
# TODO: whoever wrote the OG tests didn't care about the remaining
# fields from
# https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.certkeys
# so neither do I, for now...
# Sign cert-style (still SHA256 - so this actually does almost
# exactly the same thing under the hood as the previous sign)
msg = key.sign_ssh_data(data, "rsa-sha2-256-cert-v01@openssh.com")
msg.rewind()
assert "rsa-sha2-256" == msg.get_text()
assert signed == msg.get_binary() # same signature as above
msg.rewind()
assert key.verify_ssh_sig(b"ice weasels", msg) # our data verified
def loading_cert_of_different_type_from_key_raises_ValueError(self):
edkey = Ed25519Key.from_private_key_file(_support("ed25519.key"))
err = "PublicBlob type ssh-rsa-cert-v01@openssh.com incompatible with key type ssh-ed25519" # noqa
with raises(ValueError, match=err):
edkey.load_certificate(_support("rsa.key-cert.pub"))
def fingerprint(self, keys):
# NOTE: Hardcoded fingerprint expectation stored in fixture.
assert keys.pkey.fingerprint == keys.expected_fp
def algorithm_name(self, keys):
key = keys.pkey
if isinstance(key, RSAKey):
assert key.algorithm_name == "RSA"
elif isinstance(key, ECDSAKey):
assert key.algorithm_name == "ECDSA"
elif isinstance(key, Ed25519Key):
assert key.algorithm_name == "ED25519"
# TODO: corner case: AgentKey, whose .name can be cert-y (due to the
# value of the name field passed via agent protocol) and thus
# algorithm_name is eg "RSA-CERT" - keys loaded directly from disk will
# never look this way, even if they have a .public_blob attached.
class equality_and_hashing:
def same_key_is_equal_to_itself(self, keys):
assert keys.pkey == keys.pkey2
def same_key_same_hash(self, keys):
# NOTE: this isn't a great test due to hashseed randomization under
# Python 3 preventing use of static values, but it does still prove
# that __hash__ is implemented/doesn't explode & works across
# instances
assert hash(keys.pkey) == hash(keys.pkey2)
def keys_are_not_equal_to_other_types(self, keys):
for value in [None, True, ""]:
assert keys.pkey != value
class identifiers_classmethods:
def default_is_class_name_attribute(self):
# NOTE: not all classes _have_ this, only the ones that don't
# customize identifiers().
class MyKey(PKey):
name = "it me"
assert MyKey.identifiers() == ["it me"]
def rsa_is_all_combos_of_cert_and_sha_type(self):
assert RSAKey.identifiers() == [
"ssh-rsa",
"ssh-rsa-cert-v01@openssh.com",
"rsa-sha2-256",
"rsa-sha2-256-cert-v01@openssh.com",
"rsa-sha2-512",
"rsa-sha2-512-cert-v01@openssh.com",
]
def ed25519_is_protocol_name(self):
assert Ed25519Key.identifiers() == ["ssh-ed25519"]
def ecdsa_is_all_curve_names(self):
assert ECDSAKey.identifiers() == [
"ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp521",
]
|
PKey_
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/paramSpec19.py
|
{
"start": 1111,
"end": 1892
}
|
class ____:
def func1(self, handler: CommandHandler2[P]) -> Command2[P]:
return Command2(handler)
def func2(
self,
handler: CommandHandler2[P],
) -> Callable[[CommandHandler2[P]], Command2[P]]:
def decorator(handler: CommandHandler2[P]) -> Command2[P]:
return self.func1(handler)
return decorator
def handler(arg1: int, arg2: str) -> dict[str, Any]: ...
v1: CommandHandler2 = handler
def func1_1(x: CommandHandler1[str]):
x(3, "hi")
def func1_2(x: CommandHandler1[[str, int]]):
x(3, "hi", 3)
def func2_1(x: CommandHandler2[str]):
x("hi")
def func2_2(x: CommandHandler2[[str, int]]):
x("hi", 3)
HandlerAlias = Callable[P, None]
list_of_handlers: list[HandlerAlias[...]] = []
|
Application2
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/dml/test_evaluator.py
|
{
"start": 12437,
"end": 13938
}
|
class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
class Child(Base):
__tablename__ = "child"
_id_parent = Column(
"id_parent", Integer, ForeignKey(Parent.id), primary_key=True
)
name = Column(String(50), primary_key=True)
parent = relationship(Parent)
def test_delete_not_expired(self):
Parent, Child = self.classes("Parent", "Child")
session = fixture_session(expire_on_commit=False)
p = Parent(id=1)
session.add(p)
session.commit()
c = Child(name="foo", parent=p)
session.add(c)
session.commit()
session.query(Child).filter(Child.parent == p).delete("evaluate")
is_(inspect(c).deleted, True)
def test_delete_expired(self):
Parent, Child = self.classes("Parent", "Child")
session = fixture_session()
p = Parent(id=1)
session.add(p)
session.commit()
c = Child(name="foo", parent=p)
session.add(c)
session.commit()
session.query(Child).filter(Child.parent == p).delete("evaluate")
# because it's expired
is_(inspect(c).deleted, False)
# but it's gone
assert_raises(orm_exc.ObjectDeletedError, lambda: c.name)
|
M2OEvaluateTest
|
python
|
sanic-org__sanic
|
guide/webapp/display/markdown.py
|
{
"start": 4494,
"end": 5543
}
|
class ____(TableOfContents):
def generate_heading_id(self, token, index):
return slugify(token["text"])
RST_CODE_BLOCK_PATTERN = re.compile(
r"\.\.\scode-block::\s(\w+)\n\n((?:\n|(?:\s\s\s\s[^\n]*))+)"
)
_render_markdown = create_markdown(
renderer=DocsRenderer(),
plugins=[
RSTDirective(
[
# Admonition(),
Attributes(),
Notification(),
SanicTableOfContents(),
Column(),
Mermaid(),
Tabs(),
Hook(),
]
),
"abbr",
"def_list",
"footnotes",
"mark",
"table",
span,
inline_directive,
],
)
def render_markdown(text: str) -> str:
def replacer(match):
language = match.group(1)
code_block = dedent(match.group(2)).strip()
return f"```{language}\n{code_block}\n```\n\n"
text = RST_CODE_BLOCK_PATTERN.sub(replacer, text)
return _render_markdown(text)
|
SanicTableOfContents
|
python
|
huggingface__transformers
|
src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py
|
{
"start": 9384,
"end": 9645
}
|
class ____(PreTrainedModel):
config: PromptDepthAnythingConfig
base_model_prefix = "prompt_depth_anything"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
|
PromptDepthAnythingPreTrainedModel
|
python
|
networkx__networkx
|
networkx/classes/tests/test_graphviews.py
|
{
"start": 169,
"end": 1471
}
|
class ____:
def setup_method(self):
self.G = nx.path_graph(9, create_using=nx.DiGraph())
self.rv = nx.reverse_view(self.G)
def test_pickle(self):
import pickle
rv = self.rv
prv = pickle.loads(pickle.dumps(rv, -1))
assert rv._node == prv._node
assert rv._adj == prv._adj
assert rv.graph == prv.graph
def test_contains(self):
assert (2, 3) in self.G.edges
assert (3, 2) not in self.G.edges
assert (2, 3) not in self.rv.edges
assert (3, 2) in self.rv.edges
def test_iter(self):
expected = sorted(tuple(reversed(e)) for e in self.G.edges)
assert sorted(self.rv.edges) == expected
def test_exceptions(self):
G = nx.Graph()
pytest.raises(nx.NetworkXNotImplemented, nx.reverse_view, G)
def test_subclass(self):
class MyGraph(nx.DiGraph):
def my_method(self):
return "me"
def to_directed_class(self):
return MyGraph()
M = MyGraph()
M.add_edge(1, 2)
RM = nx.reverse_view(M)
assert RM.__class__ == MyGraph
RMC = RM.copy()
assert RMC.__class__ == MyGraph
assert RMC.has_edge(2, 1)
assert RMC.my_method() == "me"
|
TestReverseView
|
python
|
mlflow__mlflow
|
mlflow/webhooks/types.py
|
{
"start": 2289,
"end": 3044
}
|
class ____(TypedDict):
"""Payload sent when a tag is set on a model version.
Example payload:
.. code-block:: python
{
"name": "example_model",
"version": "1",
"key": "example_key",
"value": "example_value",
}
"""
name: str
"""The name of the registered model."""
version: str
"""The version of the model."""
key: str
"""The tag key being set."""
value: str
"""The tag value being set."""
@classmethod
def example(cls) -> "ModelVersionTagSetPayload":
return cls(
name="example_model",
version="1",
key="example_key",
value="example_value",
)
|
ModelVersionTagSetPayload
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.