language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | viewflow__viewflow | viewflow/workflow/flow/views/filters.py | {
"start": 611,
"end": 1274
} | class ____(BaseModelChoiceFilter):
def get_queryset(self, request):
queryset = this.resolve(self.parent, self.queryset)
if callable(queryset):
return queryset(request)
return queryset
def get_queryset_flow_task_choices(queryset):
# TODO add Node.task_name/label method
def task_name(flow_task):
return "{}/{}".format(
flow_task.flow_class.process_title, flow_task.name.title()
)
tasks = (
queryset.order_by("flow_task").values_list("flow_task", flat=True).distinct()
)
return [(get_task_ref(flow_task), task_name(flow_task)) for flow_task in tasks]
| ModelChoiceFilter |
python | walkccc__LeetCode | solutions/2674. Split a Circular Linked List/2674.py | {
"start": 0,
"end": 545
} | class ____:
def splitCircularLinkedList(self, list: ListNode | None) -> list[ListNode | None]:
slow = list
fast = list
# Point `slow` to the last node in the first half.
while fast.next != list and fast.next.next != list:
slow = slow.next
fast = fast.next.next
# Circle back the second half.
secondHead = slow.next
if fast.next == list:
fast.next = secondHead
else:
fast.next.next = secondHead
# Circle back the first half.
slow.next = list
return [list, secondHead]
| Solution |
python | paramiko__paramiko | paramiko/auth_strategy.py | {
"start": 1126,
"end": 1340
} | class ____(AuthSource):
"""
Auth type "none", ie https://www.rfc-editor.org/rfc/rfc4252#section-5.2 .
"""
def authenticate(self, transport):
return transport.auth_none(self.username)
| NoneAuth |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_permissions.py | {
"start": 1663,
"end": 1821
} | class ____:
@check_permission("fake_other_permission")
def mutate(self, graphene_info: ResolveInfo, **_kwargs):
pass
| FakeOtherPermissionMutation |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 836384,
"end": 837120
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for PinnedIssue."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("PinnedIssueEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("PinnedIssue"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| PinnedIssueConnection |
python | astropy__astropy | astropy/convolution/tests/test_convolve_models.py | {
"start": 407,
"end": 3905
} | class ____:
@pytest.mark.parametrize("mode", [convolve_fft, convolve])
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_is_consistency_with_astropy_convolution(self, mode):
kernel = models.Gaussian1D(1, 0, 1)
model = models.Gaussian1D(1, 0, 1)
model_conv = convolve_models(model, kernel, mode=mode.__name__)
x = np.arange(-5, 6)
ans = mode(model(x), kernel(x))
assert_allclose(ans, model_conv(x), atol=1e-5)
@pytest.mark.parametrize("mode", ["convolve_fft", "convolve"])
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_against_scipy(self, mode):
from scipy.signal import fftconvolve
kernel = models.Gaussian1D(1, 0, 1)
model = models.Gaussian1D(1, 0, 1)
model_conv = convolve_models(model, kernel, mode=mode)
x = np.arange(-5, 6)
ans = fftconvolve(kernel(x), model(x), mode="same")
assert_allclose(ans, model_conv(x) * kernel(x).sum(), atol=1e-5)
@pytest.mark.parametrize("mode", ["convolve_fft", "convolve"])
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_against_scipy_with_additional_keywords(self, mode):
from scipy.signal import fftconvolve
kernel = models.Gaussian1D(1, 0, 1)
model = models.Gaussian1D(1, 0, 1)
model_conv = convolve_models(model, kernel, mode=mode, normalize_kernel=False)
x = np.arange(-5, 6)
ans = fftconvolve(kernel(x), model(x), mode="same")
assert_allclose(ans, model_conv(x), atol=1e-5)
@pytest.mark.parametrize("mode", ["convolve_fft", "convolve"])
def test_sum_of_gaussians(self, mode):
"""
Test that convolving N(a, b) with N(c, d) gives N(a + c, b + d),
where N is a Gaussian probability density function, in which a
and c are their means and b and d are their variances.
"""
kernel = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 1, 1)
model = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 3, 1)
model_conv = convolve_models(model, kernel, mode=mode, normalize_kernel=False)
ans = models.Gaussian1D(1 / (2 * math.sqrt(np.pi)), 4, np.sqrt(2))
x = np.arange(-5, 6)
assert_allclose(ans(x), model_conv(x), atol=1e-3)
@pytest.mark.parametrize("mode", ["convolve_fft", "convolve"])
def test_convolve_box_models(self, mode):
kernel = models.Box1D()
model = models.Box1D()
model_conv = convolve_models(model, kernel, mode=mode)
x = np.linspace(-1, 1, 99)
ans = (x + 1) * (x < 0) + (-x + 1) * (x >= 0)
assert_allclose(ans, model_conv(x), atol=1e-3)
@pytest.mark.parametrize("mode", ["convolve_fft", "convolve"])
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_fitting_convolve_models(self, mode):
"""
test that a convolve model can be fitted
"""
b1 = models.Box1D()
g1 = models.Gaussian1D()
x = np.linspace(-5, 5, 99)
fake_model = models.Gaussian1D(amplitude=10)
with NumpyRNGContext(123):
fake_data = fake_model(x) + np.random.normal(size=len(x))
init_model = convolve_models(b1, g1, mode=mode, normalize_kernel=False)
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, fake_data)
me = np.mean(fitted_model(x) - fake_data)
assert_almost_equal(me, 0.0, decimal=2)
| TestConvolve1DModels |
python | pennersr__django-allauth | allauth/socialaccount/providers/tiktok/client.py | {
"start": 73,
"end": 152
} | class ____(OAuth2Client):
client_id_parameter = "client_key"
| TikTokOAuth2Client |
python | django__django | tests/template_tests/syntax_tests/test_if.py | {
"start": 172,
"end": 28502
} | class ____(SimpleTestCase):
@setup({"if-tag01": "{% if foo %}yes{% else %}no{% endif %}"})
def test_if_tag01(self):
output = self.engine.render_to_string("if-tag01", {"foo": True})
self.assertEqual(output, "yes")
@setup({"if-tag02": "{% if foo %}yes{% else %}no{% endif %}"})
def test_if_tag02(self):
output = self.engine.render_to_string("if-tag02", {"foo": False})
self.assertEqual(output, "no")
@setup({"if-tag03": "{% if foo %}yes{% else %}no{% endif %}"})
def test_if_tag03(self):
output = self.engine.render_to_string("if-tag03")
self.assertEqual(output, "no")
@setup({"if-tag04": "{% if foo %}foo{% elif bar %}bar{% endif %}"})
def test_if_tag04(self):
output = self.engine.render_to_string("if-tag04", {"foo": True})
self.assertEqual(output, "foo")
@setup({"if-tag05": "{% if foo %}foo{% elif bar %}bar{% endif %}"})
def test_if_tag05(self):
output = self.engine.render_to_string("if-tag05", {"bar": True})
self.assertEqual(output, "bar")
@setup({"if-tag06": "{% if foo %}foo{% elif bar %}bar{% endif %}"})
def test_if_tag06(self):
output = self.engine.render_to_string("if-tag06")
self.assertEqual(output, "")
@setup({"if-tag07": "{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}"})
def test_if_tag07(self):
output = self.engine.render_to_string("if-tag07", {"foo": True})
self.assertEqual(output, "foo")
@setup({"if-tag08": "{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}"})
def test_if_tag08(self):
output = self.engine.render_to_string("if-tag08", {"bar": True})
self.assertEqual(output, "bar")
@setup({"if-tag09": "{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}"})
def test_if_tag09(self):
output = self.engine.render_to_string("if-tag09")
self.assertEqual(output, "nothing")
@setup(
{
"if-tag10": (
"{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing"
"{% endif %}"
)
}
)
def test_if_tag10(self):
output = self.engine.render_to_string("if-tag10", {"foo": True})
self.assertEqual(output, "foo")
@setup(
{
"if-tag11": (
"{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing"
"{% endif %}"
)
}
)
def test_if_tag11(self):
output = self.engine.render_to_string("if-tag11", {"bar": True})
self.assertEqual(output, "bar")
@setup(
{
"if-tag12": (
"{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing"
"{% endif %}"
)
}
)
def test_if_tag12(self):
output = self.engine.render_to_string("if-tag12", {"baz": True})
self.assertEqual(output, "baz")
@setup(
{
"if-tag13": (
"{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing"
"{% endif %}"
)
}
)
def test_if_tag13(self):
output = self.engine.render_to_string("if-tag13")
self.assertEqual(output, "nothing")
# Filters
@setup({"if-tag-filter01": "{% if foo|length == 5 %}yes{% else %}no{% endif %}"})
def test_if_tag_filter01(self):
output = self.engine.render_to_string("if-tag-filter01", {"foo": "abcde"})
self.assertEqual(output, "yes")
@setup({"if-tag-filter02": "{% if foo|upper == 'ABC' %}yes{% else %}no{% endif %}"})
def test_if_tag_filter02(self):
output = self.engine.render_to_string("if-tag-filter02")
self.assertEqual(output, "no")
# Equality
@setup({"if-tag-eq01": "{% if foo == bar %}yes{% else %}no{% endif %}"})
def test_if_tag_eq01(self):
output = self.engine.render_to_string("if-tag-eq01")
self.assertEqual(output, "yes")
@setup({"if-tag-eq02": "{% if foo == bar %}yes{% else %}no{% endif %}"})
def test_if_tag_eq02(self):
output = self.engine.render_to_string("if-tag-eq02", {"foo": 1})
self.assertEqual(output, "no")
@setup({"if-tag-eq03": "{% if foo == bar %}yes{% else %}no{% endif %}"})
def test_if_tag_eq03(self):
output = self.engine.render_to_string("if-tag-eq03", {"foo": 1, "bar": 1})
self.assertEqual(output, "yes")
@setup({"if-tag-eq04": "{% if foo == bar %}yes{% else %}no{% endif %}"})
def test_if_tag_eq04(self):
output = self.engine.render_to_string("if-tag-eq04", {"foo": 1, "bar": 2})
self.assertEqual(output, "no")
@setup({"if-tag-eq05": "{% if foo == '' %}yes{% else %}no{% endif %}"})
def test_if_tag_eq05(self):
output = self.engine.render_to_string("if-tag-eq05")
self.assertEqual(output, "no")
# Inequality
@setup({"if-tag-noteq01": "{% if foo != bar %}yes{% else %}no{% endif %}"})
def test_if_tag_noteq01(self):
output = self.engine.render_to_string("if-tag-noteq01")
self.assertEqual(output, "no")
@setup({"if-tag-noteq02": "{% if foo != bar %}yes{% else %}no{% endif %}"})
def test_if_tag_noteq02(self):
output = self.engine.render_to_string("if-tag-noteq02", {"foo": 1})
self.assertEqual(output, "yes")
@setup({"if-tag-noteq03": "{% if foo != bar %}yes{% else %}no{% endif %}"})
def test_if_tag_noteq03(self):
output = self.engine.render_to_string("if-tag-noteq03", {"foo": 1, "bar": 1})
self.assertEqual(output, "no")
@setup({"if-tag-noteq04": "{% if foo != bar %}yes{% else %}no{% endif %}"})
def test_if_tag_noteq04(self):
output = self.engine.render_to_string("if-tag-noteq04", {"foo": 1, "bar": 2})
self.assertEqual(output, "yes")
@setup({"if-tag-noteq05": '{% if foo != "" %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq05(self):
output = self.engine.render_to_string("if-tag-noteq05")
self.assertEqual(output, "yes")
# Comparison
@setup({"if-tag-gt-01": "{% if 2 > 1 %}yes{% else %}no{% endif %}"})
def test_if_tag_gt_01(self):
output = self.engine.render_to_string("if-tag-gt-01")
self.assertEqual(output, "yes")
@setup({"if-tag-gt-02": "{% if 1 > 1 %}yes{% else %}no{% endif %}"})
def test_if_tag_gt_02(self):
output = self.engine.render_to_string("if-tag-gt-02")
self.assertEqual(output, "no")
@setup({"if-tag-gte-01": "{% if 1 >= 1 %}yes{% else %}no{% endif %}"})
def test_if_tag_gte_01(self):
output = self.engine.render_to_string("if-tag-gte-01")
self.assertEqual(output, "yes")
@setup({"if-tag-gte-02": "{% if 1 >= 2 %}yes{% else %}no{% endif %}"})
def test_if_tag_gte_02(self):
output = self.engine.render_to_string("if-tag-gte-02")
self.assertEqual(output, "no")
@setup({"if-tag-lt-01": "{% if 1 < 2 %}yes{% else %}no{% endif %}"})
def test_if_tag_lt_01(self):
output = self.engine.render_to_string("if-tag-lt-01")
self.assertEqual(output, "yes")
@setup({"if-tag-lt-02": "{% if 1 < 1 %}yes{% else %}no{% endif %}"})
def test_if_tag_lt_02(self):
output = self.engine.render_to_string("if-tag-lt-02")
self.assertEqual(output, "no")
@setup({"if-tag-lte-01": "{% if 1 <= 1 %}yes{% else %}no{% endif %}"})
def test_if_tag_lte_01(self):
output = self.engine.render_to_string("if-tag-lte-01")
self.assertEqual(output, "yes")
@setup({"if-tag-lte-02": "{% if 2 <= 1 %}yes{% else %}no{% endif %}"})
def test_if_tag_lte_02(self):
output = self.engine.render_to_string("if-tag-lte-02")
self.assertEqual(output, "no")
# Contains
@setup({"if-tag-in-01": "{% if 1 in x %}yes{% else %}no{% endif %}"})
def test_if_tag_in_01(self):
output = self.engine.render_to_string("if-tag-in-01", {"x": [1]})
self.assertEqual(output, "yes")
@setup({"if-tag-in-02": "{% if 2 in x %}yes{% else %}no{% endif %}"})
def test_if_tag_in_02(self):
output = self.engine.render_to_string("if-tag-in-02", {"x": [1]})
self.assertEqual(output, "no")
@setup({"if-tag-not-in-01": "{% if 1 not in x %}yes{% else %}no{% endif %}"})
def test_if_tag_not_in_01(self):
output = self.engine.render_to_string("if-tag-not-in-01", {"x": [1]})
self.assertEqual(output, "no")
@setup({"if-tag-not-in-02": "{% if 2 not in x %}yes{% else %}no{% endif %}"})
def test_if_tag_not_in_02(self):
output = self.engine.render_to_string("if-tag-not-in-02", {"x": [1]})
self.assertEqual(output, "yes")
# AND
@setup({"if-tag-and01": "{% if foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_and01(self):
output = self.engine.render_to_string(
"if-tag-and01", {"foo": True, "bar": True}
)
self.assertEqual(output, "yes")
@setup({"if-tag-and02": "{% if foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_and02(self):
output = self.engine.render_to_string(
"if-tag-and02", {"foo": True, "bar": False}
)
self.assertEqual(output, "no")
@setup({"if-tag-and03": "{% if foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_and03(self):
output = self.engine.render_to_string(
"if-tag-and03", {"foo": False, "bar": True}
)
self.assertEqual(output, "no")
@setup({"if-tag-and04": "{% if foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_and04(self):
output = self.engine.render_to_string(
"if-tag-and04", {"foo": False, "bar": False}
)
self.assertEqual(output, "no")
@setup({"if-tag-and05": "{% if foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_and05(self):
output = self.engine.render_to_string("if-tag-and05", {"foo": False})
self.assertEqual(output, "no")
@setup({"if-tag-and06": "{% if foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_and06(self):
output = self.engine.render_to_string("if-tag-and06", {"bar": False})
self.assertEqual(output, "no")
@setup({"if-tag-and07": "{% if foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_and07(self):
output = self.engine.render_to_string("if-tag-and07", {"foo": True})
self.assertEqual(output, "no")
@setup({"if-tag-and08": "{% if foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_and08(self):
output = self.engine.render_to_string("if-tag-and08", {"bar": True})
self.assertEqual(output, "no")
# OR
@setup({"if-tag-or01": "{% if foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_or01(self):
output = self.engine.render_to_string("if-tag-or01", {"foo": True, "bar": True})
self.assertEqual(output, "yes")
@setup({"if-tag-or02": "{% if foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_or02(self):
output = self.engine.render_to_string(
"if-tag-or02", {"foo": True, "bar": False}
)
self.assertEqual(output, "yes")
@setup({"if-tag-or03": "{% if foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_or03(self):
output = self.engine.render_to_string(
"if-tag-or03", {"foo": False, "bar": True}
)
self.assertEqual(output, "yes")
@setup({"if-tag-or04": "{% if foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_or04(self):
output = self.engine.render_to_string(
"if-tag-or04", {"foo": False, "bar": False}
)
self.assertEqual(output, "no")
@setup({"if-tag-or05": "{% if foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_or05(self):
output = self.engine.render_to_string("if-tag-or05", {"foo": False})
self.assertEqual(output, "no")
@setup({"if-tag-or06": "{% if foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_or06(self):
output = self.engine.render_to_string("if-tag-or06", {"bar": False})
self.assertEqual(output, "no")
@setup({"if-tag-or07": "{% if foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_or07(self):
output = self.engine.render_to_string("if-tag-or07", {"foo": True})
self.assertEqual(output, "yes")
@setup({"if-tag-or08": "{% if foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_or08(self):
output = self.engine.render_to_string("if-tag-or08", {"bar": True})
self.assertEqual(output, "yes")
@setup({"if-tag-or09": "{% if foo or bar or baz %}yes{% else %}no{% endif %}"})
def test_if_tag_or09(self):
"""
multiple ORs
"""
output = self.engine.render_to_string("if-tag-or09", {"baz": True})
self.assertEqual(output, "yes")
# NOT
@setup({"if-tag-not01": "{% if not foo %}no{% else %}yes{% endif %}"})
def test_if_tag_not01(self):
output = self.engine.render_to_string("if-tag-not01", {"foo": True})
self.assertEqual(output, "yes")
@setup({"if-tag-not02": "{% if not not foo %}no{% else %}yes{% endif %}"})
def test_if_tag_not02(self):
output = self.engine.render_to_string("if-tag-not02", {"foo": True})
self.assertEqual(output, "no")
@setup({"if-tag-not06": "{% if foo and not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not06(self):
output = self.engine.render_to_string("if-tag-not06")
self.assertEqual(output, "no")
@setup({"if-tag-not07": "{% if foo and not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not07(self):
output = self.engine.render_to_string(
"if-tag-not07", {"foo": True, "bar": True}
)
self.assertEqual(output, "no")
@setup({"if-tag-not08": "{% if foo and not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not08(self):
output = self.engine.render_to_string(
"if-tag-not08", {"foo": True, "bar": False}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not09": "{% if foo and not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not09(self):
output = self.engine.render_to_string(
"if-tag-not09", {"foo": False, "bar": True}
)
self.assertEqual(output, "no")
@setup({"if-tag-not10": "{% if foo and not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not10(self):
output = self.engine.render_to_string(
"if-tag-not10", {"foo": False, "bar": False}
)
self.assertEqual(output, "no")
@setup({"if-tag-not11": "{% if not foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not11(self):
output = self.engine.render_to_string("if-tag-not11")
self.assertEqual(output, "no")
@setup({"if-tag-not12": "{% if not foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not12(self):
output = self.engine.render_to_string(
"if-tag-not12", {"foo": True, "bar": True}
)
self.assertEqual(output, "no")
@setup({"if-tag-not13": "{% if not foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not13(self):
output = self.engine.render_to_string(
"if-tag-not13", {"foo": True, "bar": False}
)
self.assertEqual(output, "no")
@setup({"if-tag-not14": "{% if not foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not14(self):
output = self.engine.render_to_string(
"if-tag-not14", {"foo": False, "bar": True}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not15": "{% if not foo and bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not15(self):
output = self.engine.render_to_string(
"if-tag-not15", {"foo": False, "bar": False}
)
self.assertEqual(output, "no")
@setup({"if-tag-not16": "{% if foo or not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not16(self):
output = self.engine.render_to_string("if-tag-not16")
self.assertEqual(output, "yes")
@setup({"if-tag-not17": "{% if foo or not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not17(self):
output = self.engine.render_to_string(
"if-tag-not17", {"foo": True, "bar": True}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not18": "{% if foo or not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not18(self):
output = self.engine.render_to_string(
"if-tag-not18", {"foo": True, "bar": False}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not19": "{% if foo or not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not19(self):
output = self.engine.render_to_string(
"if-tag-not19", {"foo": False, "bar": True}
)
self.assertEqual(output, "no")
@setup({"if-tag-not20": "{% if foo or not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not20(self):
output = self.engine.render_to_string(
"if-tag-not20", {"foo": False, "bar": False}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not21": "{% if not foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not21(self):
output = self.engine.render_to_string("if-tag-not21")
self.assertEqual(output, "yes")
@setup({"if-tag-not22": "{% if not foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not22(self):
output = self.engine.render_to_string(
"if-tag-not22", {"foo": True, "bar": True}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not23": "{% if not foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not23(self):
output = self.engine.render_to_string(
"if-tag-not23", {"foo": True, "bar": False}
)
self.assertEqual(output, "no")
@setup({"if-tag-not24": "{% if not foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not24(self):
output = self.engine.render_to_string(
"if-tag-not24", {"foo": False, "bar": True}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not25": "{% if not foo or bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not25(self):
output = self.engine.render_to_string(
"if-tag-not25", {"foo": False, "bar": False}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not26": "{% if not foo and not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not26(self):
output = self.engine.render_to_string("if-tag-not26")
self.assertEqual(output, "yes")
@setup({"if-tag-not27": "{% if not foo and not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not27(self):
output = self.engine.render_to_string(
"if-tag-not27", {"foo": True, "bar": True}
)
self.assertEqual(output, "no")
@setup({"if-tag-not28": "{% if not foo and not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not28(self):
output = self.engine.render_to_string(
"if-tag-not28", {"foo": True, "bar": False}
)
self.assertEqual(output, "no")
@setup({"if-tag-not29": "{% if not foo and not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not29(self):
output = self.engine.render_to_string(
"if-tag-not29", {"foo": False, "bar": True}
)
self.assertEqual(output, "no")
@setup({"if-tag-not30": "{% if not foo and not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not30(self):
output = self.engine.render_to_string(
"if-tag-not30", {"foo": False, "bar": False}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not31": "{% if not foo or not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not31(self):
output = self.engine.render_to_string("if-tag-not31")
self.assertEqual(output, "yes")
@setup({"if-tag-not32": "{% if not foo or not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not32(self):
output = self.engine.render_to_string(
"if-tag-not32", {"foo": True, "bar": True}
)
self.assertEqual(output, "no")
@setup({"if-tag-not33": "{% if not foo or not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not33(self):
output = self.engine.render_to_string(
"if-tag-not33", {"foo": True, "bar": False}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not34": "{% if not foo or not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not34(self):
output = self.engine.render_to_string(
"if-tag-not34", {"foo": False, "bar": True}
)
self.assertEqual(output, "yes")
@setup({"if-tag-not35": "{% if not foo or not bar %}yes{% else %}no{% endif %}"})
def test_if_tag_not35(self):
output = self.engine.render_to_string(
"if-tag-not35", {"foo": False, "bar": False}
)
self.assertEqual(output, "yes")
# Various syntax errors
@setup({"if-tag-error01": "{% if %}yes{% endif %}"})
def test_if_tag_error01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("if-tag-error01")
@setup({"if-tag-error02": "{% if foo and %}yes{% else %}no{% endif %}"})
def test_if_tag_error02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string("if-tag-error02", {"foo": True})
@setup({"if-tag-error03": "{% if foo or %}yes{% else %}no{% endif %}"})
def test_if_tag_error03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string("if-tag-error03", {"foo": True})
@setup({"if-tag-error04": "{% if not foo and %}yes{% else %}no{% endif %}"})
def test_if_tag_error04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string("if-tag-error04", {"foo": True})
@setup({"if-tag-error05": "{% if not foo or %}yes{% else %}no{% endif %}"})
def test_if_tag_error05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string("if-tag-error05", {"foo": True})
@setup({"if-tag-error06": "{% if abc def %}yes{% endif %}"})
def test_if_tag_error06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("if-tag-error06")
@setup({"if-tag-error07": "{% if not %}yes{% endif %}"})
def test_if_tag_error07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("if-tag-error07")
@setup({"if-tag-error08": "{% if and %}yes{% endif %}"})
def test_if_tag_error08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("if-tag-error08")
@setup({"if-tag-error09": "{% if or %}yes{% endif %}"})
def test_if_tag_error09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("if-tag-error09")
@setup({"if-tag-error10": "{% if == %}yes{% endif %}"})
def test_if_tag_error10(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("if-tag-error10")
@setup({"if-tag-error11": "{% if 1 == %}yes{% endif %}"})
def test_if_tag_error11(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("if-tag-error11")
@setup({"if-tag-error12": "{% if a not b %}yes{% endif %}"})
def test_if_tag_error12(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("if-tag-error12")
@setup(
{
"else-if-tag-error01": (
"{% if foo is bar %} yes {% else if foo is not bar %} no {% endif %}"
)
}
)
def test_else_if_tag_error01(self):
error_message = 'Malformed template tag at line 1: "else if foo is not bar"'
with self.assertRaisesMessage(TemplateSyntaxError, error_message):
self.engine.get_template("else-if-tag-error01")
@setup(
{
"if-tag-shortcircuit01": (
"{% if x.is_true or x.is_bad %}yes{% else %}no{% endif %}"
)
}
)
def test_if_tag_shortcircuit01(self):
"""
If evaluations are shortcircuited where possible
"""
output = self.engine.render_to_string("if-tag-shortcircuit01", {"x": TestObj()})
self.assertEqual(output, "yes")
@setup(
{
"if-tag-shortcircuit02": (
"{% if x.is_false and x.is_bad %}yes{% else %}no{% endif %}"
)
}
)
def test_if_tag_shortcircuit02(self):
"""
The is_bad() function should not be evaluated. If it is, an
exception is raised.
"""
output = self.engine.render_to_string("if-tag-shortcircuit02", {"x": TestObj()})
self.assertEqual(output, "no")
@setup({"if-tag-badarg01": "{% if x|default_if_none:y %}yes{% endif %}"})
def test_if_tag_badarg01(self):
"""Nonexistent args"""
output = self.engine.render_to_string("if-tag-badarg01")
self.assertEqual(output, "")
@setup({"if-tag-badarg02": "{% if x|default_if_none:y %}yes{% endif %}"})
def test_if_tag_badarg02(self):
output = self.engine.render_to_string("if-tag-badarg02", {"y": 0})
self.assertEqual(output, "")
@setup({"if-tag-badarg03": "{% if x|default_if_none:y %}yes{% endif %}"})
def test_if_tag_badarg03(self):
output = self.engine.render_to_string("if-tag-badarg03", {"y": 1})
self.assertEqual(output, "yes")
@setup(
{"if-tag-badarg04": "{% if x|default_if_none:y %}yes{% else %}no{% endif %}"}
)
def test_if_tag_badarg04(self):
output = self.engine.render_to_string("if-tag-badarg04")
self.assertEqual(output, "no")
@setup({"if-tag-single-eq": "{% if foo = bar %}yes{% else %}no{% endif %}"})
def test_if_tag_single_eq(self):
# A single equals sign is a syntax error.
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string("if-tag-single-eq", {"foo": 1})
@setup({"template": "{% if foo is True %}yes{% else %}no{% endif %}"})
def test_if_is_match(self):
output = self.engine.render_to_string("template", {"foo": True})
self.assertEqual(output, "yes")
@setup({"template": "{% if foo is True %}yes{% else %}no{% endif %}"})
def test_if_is_no_match(self):
output = self.engine.render_to_string("template", {"foo": 1})
self.assertEqual(output, "no")
@setup({"template": "{% if foo is bar %}yes{% else %}no{% endif %}"})
def test_if_is_variable_missing(self):
output = self.engine.render_to_string("template", {"foo": 1})
self.assertEqual(output, "no")
@setup({"template": "{% if foo is bar %}yes{% else %}no{% endif %}"})
def test_if_is_both_variables_missing(self):
output = self.engine.render_to_string("template", {})
self.assertEqual(output, "yes")
@setup({"template": "{% if foo is not None %}yes{% else %}no{% endif %}"})
def test_if_is_not_match(self):
# For this to act as a regression test, it's important not to use
# foo=True because True is (not None)
output = self.engine.render_to_string("template", {"foo": False})
self.assertEqual(output, "yes")
@setup({"template": "{% if foo is not None %}yes{% else %}no{% endif %}"})
def test_if_is_not_no_match(self):
output = self.engine.render_to_string("template", {"foo": None})
self.assertEqual(output, "no")
@setup({"template": "{% if foo is not bar %}yes{% else %}no{% endif %}"})
def test_if_is_not_variable_missing(self):
output = self.engine.render_to_string("template", {"foo": False})
self.assertEqual(output, "yes")
@setup({"template": "{% if foo is not bar %}yes{% else %}no{% endif %}"})
def test_if_is_not_both_variables_missing(self):
output = self.engine.render_to_string("template", {})
self.assertEqual(output, "no")
| IfTagTests |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 18834,
"end": 20807
} | class ____(SingleContinuousDistribution):
_argnames = ('x0', 'gamma')
@staticmethod
def check(x0, gamma):
_value_check(gamma > 0, "Scale parameter Gamma must be positive.")
_value_check(x0.is_real, "Location parameter must be real.")
def pdf(self, x):
return 1/(pi*self.gamma*(1 + ((x - self.x0)/self.gamma)**2))
def _cdf(self, x):
x0, gamma = self.x0, self.gamma
return (1/pi)*atan((x - x0)/gamma) + S.Half
def _characteristic_function(self, t):
return exp(self.x0 * I * t - self.gamma * Abs(t))
def _moment_generating_function(self, t):
raise NotImplementedError("The moment generating function for the "
"Cauchy distribution does not exist.")
def _quantile(self, p):
return self.x0 + self.gamma*tan(pi*(p - S.Half))
def Cauchy(name, x0, gamma):
r"""
Create a continuous random variable with a Cauchy distribution.
The density of the Cauchy distribution is given by
.. math::
f(x) := \frac{1}{\pi \gamma [1 + {(\frac{x-x_0}{\gamma})}^2]}
Parameters
==========
x0 : Real number, the location
gamma : Real number, `\gamma > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Cauchy, density
>>> from sympy import Symbol
>>> x0 = Symbol("x0")
>>> gamma = Symbol("gamma", positive=True)
>>> z = Symbol("z")
>>> X = Cauchy("x", x0, gamma)
>>> density(X)(z)
1/(pi*gamma*(1 + (-x0 + z)**2/gamma**2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Cauchy_distribution
.. [2] https://mathworld.wolfram.com/CauchyDistribution.html
"""
return rv(name, CauchyDistribution, (x0, gamma))
#-------------------------------------------------------------------------------
# Chi distribution -------------------------------------------------------------
| CauchyDistribution |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 129121,
"end": 132326
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True, arbitrary_types_allowed=True)
dbt_task: Optional[DbtTask] = Field(
None,
description=(
"If dbt_task, indicates that this must execute a dbt task. It requires both"
" Databricks SQL and the ability to use a serverless or a pro SQL"
" warehouse."
),
)
depends_on: Optional[TaskDependencies] = None
existing_cluster_id: Optional[str] = Field(
None,
description=(
"If existing_cluster_id, the ID of an existing cluster that is used for all"
" runs of this task. When running tasks on an existing cluster, you may"
" need to manually restart the cluster if it stops responding. We suggest"
" running jobs on new clusters for greater reliability."
),
examples=["0923-164208-meows279"],
)
libraries: Optional[List[Library]] = Field(
None,
description=(
"An optional list of libraries to be installed on the cluster that executes"
" the task. The default value is an empty list."
),
)
new_cluster: Optional[NewCluster] = Field(
None,
description=(
"If new_cluster, a description of a cluster that is created for each run."
),
)
notebook_task: Optional[NotebookTask] = Field(
None,
description=(
"If notebook_task, indicates that this task must run a notebook. This field"
" may not be specified in conjunction with spark_jar_task."
),
)
pipeline_task: Optional[PipelineTask] = Field(
None,
description=(
"If pipeline_task, indicates that this task must execute a Pipeline."
),
)
python_wheel_task: Optional[PythonWheelTask] = Field(
None,
description=(
"If python_wheel_task, indicates that this job must execute a PythonWheel."
),
)
spark_jar_task: Optional[SparkJarTask] = Field(
None, description="If spark_jar_task, indicates that this task must run a JAR."
)
spark_python_task: Optional[SparkPythonTask] = Field(
None,
description=(
"If spark_python_task, indicates that this task must run a Python file."
),
)
spark_submit_task: Optional[SparkSubmitTask] = Field(
None,
description=(
"If spark_submit_task, indicates that this task must be launched by the"
" spark submit script."
),
)
sql_task: Optional[SqlTask] = Field(
None,
description=(
"If sql_task, indicates that this job must execute a SQL task. It requires"
" both Databricks SQL and a serverless or a pro SQL warehouse."
),
)
task_key: TaskKey
timeout_seconds: Optional[int] = Field(
None,
description=(
"An optional timeout applied to each run of this job task. The default"
" behavior is to have no timeout."
),
examples=[86400],
)
| RunSubmitTaskSettings |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias7.py | {
"start": 383,
"end": 1341
} | class ____(Generic[TResult, TError]):
def map(
self, mapper: Callable[[Context[TResult]], TResult]
) -> "Result[TResult, TError]":
return Result()
HttpFuncResult = Result[Context[TResult], TError]
HttpFuncResultAsync = Awaitable[Result[Context[TResult], TError]]
HttpFunc = Callable[
[Context[TNext]],
HttpFuncResultAsync[TResult, TError],
]
HttpHandler = Callable[
[
HttpFunc[TNext, TResult, TError],
Context[TSource],
],
HttpFuncResultAsync[TResult, TError],
]
async def run_async(
ctx: Context[TSource],
handler: HttpHandler[str, TResult, TError, TSource],
) -> Result[TResult, TError]:
result = Result[TResult, TError]()
def mapper(x: Context[TResult]) -> TResult:
return x.Response
return result.map(mapper)
T1 = TypeVar("T1", bound=Literal["a", "b", "c"])
T2 = TypeVar("T2", bound=Literal["b", "c"])
TA2: TypeAlias = list[T1]
TA3: TypeAlias = TA2[T2]
| Result |
python | django__django | tests/expressions/tests.py | {
"start": 92141,
"end": 97602
} | class ____(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(
uuid=Value(
uuid.UUID("12345678901234567890123456789012"), output_field=UUIDField()
)
)
self.assertEqual(
UUID.objects.get().uuid, uuid.UUID("12345678901234567890123456789012")
)
def test_deconstruct(self):
value = Value("name")
path, args, kwargs = value.deconstruct()
self.assertEqual(path, "django.db.models.Value")
self.assertEqual(args, (value.value,))
self.assertEqual(kwargs, {})
def test_deconstruct_output_field(self):
value = Value("name", output_field=CharField())
path, args, kwargs = value.deconstruct()
self.assertEqual(path, "django.db.models.Value")
self.assertEqual(args, (value.value,))
self.assertEqual(len(kwargs), 1)
self.assertEqual(
kwargs["output_field"].deconstruct(), CharField().deconstruct()
)
def test_repr(self):
tests = [
(None, "Value(None)"),
("str", "Value('str')"),
(True, "Value(True)"),
(42, "Value(42)"),
(
datetime.datetime(2019, 5, 15),
"Value(datetime.datetime(2019, 5, 15, 0, 0))",
),
(Decimal("3.14"), "Value(Decimal('3.14'))"),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(repr(Value(value)), expected)
def test_equal(self):
value = Value("name")
self.assertEqual(value, Value("name"))
self.assertNotEqual(value, Value("username"))
def test_hash(self):
d = {Value("name"): "Bob"}
self.assertIn(Value("name"), d)
self.assertEqual(d[Value("name")], "Bob")
def test_equal_output_field(self):
value = Value("name", output_field=CharField())
same_value = Value("name", output_field=CharField())
other_value = Value("name", output_field=TimeField())
no_output_field = Value("name")
self.assertEqual(value, same_value)
self.assertNotEqual(value, other_value)
self.assertNotEqual(value, no_output_field)
def test_compile_unresolved(self):
# This test might need to be revisited later on if #25425 is enforced.
compiler = Time.objects.all().query.get_compiler(connection=connection)
value = Value("foo")
self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"]))
value = Value("foo", output_field=CharField())
self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"]))
def test_output_field_decimalfield(self):
Time.objects.create()
time = Time.objects.annotate(one=Value(1, output_field=DecimalField())).first()
self.assertEqual(time.one, 1)
def test_output_field_is_none_error(self):
with self.assertRaises(OutputFieldIsNoneError):
Employee.objects.annotate(custom_expression=Value(None)).first()
def test_output_field_or_none_property_not_cached(self):
expression = Value(None, output_field=None)
self.assertIsNone(expression._output_field_or_none)
expression.output_field = BooleanField()
self.assertIsInstance(expression._output_field_or_none, BooleanField)
def test_resolve_output_field(self):
value_types = [
("str", CharField),
(True, BooleanField),
(42, IntegerField),
(3.14, FloatField),
(datetime.date(2019, 5, 15), DateField),
(datetime.datetime(2019, 5, 15), DateTimeField),
(datetime.time(3, 16), TimeField),
(datetime.timedelta(1), DurationField),
(Decimal("3.14"), DecimalField),
(b"", BinaryField),
(uuid.uuid4(), UUIDField),
]
for value, output_field_type in value_types:
with self.subTest(type=type(value)):
expr = Value(value)
self.assertIsInstance(expr.output_field, output_field_type)
def test_resolve_output_field_failure(self):
msg = "Cannot resolve expression type, unknown output_field"
with self.assertRaisesMessage(FieldError, msg):
Value(object()).output_field
def test_output_field_does_not_create_broken_validators(self):
"""
The output field for a given Value doesn't get cleaned & validated,
however validators may still be instantiated for a given field type
and this demonstrates that they don't throw an exception.
"""
value_types = [
"str",
True,
42,
3.14,
datetime.date(2019, 5, 15),
datetime.datetime(2019, 5, 15),
datetime.time(3, 16),
datetime.timedelta(1),
Decimal("3.14"),
b"",
uuid.uuid4(),
]
for value in value_types:
with self.subTest(type=type(value)):
field = Value(value)._resolve_output_field()
field.clean(value, model_instance=None)
| ValueTests |
python | joke2k__faker | tests/providers/test_automotive.py | {
"start": 7384,
"end": 7597
} | class ____(_SimpleAutomotiveTestMixin):
"""Test nl_BE automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r"(\d{3}-[A-Z]{3})|" r"([A-Z]{3}-\d{3})|" r"([1-2]-[A-Z]{3}-\d{3})")
| TestNlBe |
python | realpython__materials | python-class/mixins.py | {
"start": 28,
"end": 127
} | class ____:
def __init__(self, name, age):
self.name = name
self.age = age
| Person |
python | huggingface__transformers | src/transformers/models/cohere/modeling_cohere.py | {
"start": 6203,
"end": 10295
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_half(x):
# Split and rotate. Note that this function is different from e.g. Llama.
x1 = x[..., ::2]
x2 = x[..., 1::2]
rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2)
return rot_x
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
dtype = q.dtype
q = q.float()
k = k.float()
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed.to(dtype=dtype), k_embed.to(dtype=dtype)
| CohereMLP |
python | huggingface__transformers | src/transformers/models/olmo3/modular_olmo3.py | {
"start": 1467,
"end": 9501
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Olmo3Model`]. It is used to instantiate an OLMo3
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the [allenai/OLMo-3-0725-1B](https://huggingface.co/allenai/OLMo-3-0725-1B).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the Olmo3 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Olmo3Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50279):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
sliding_window (`int`, *optional*, defaults to 4096):
Size of the sliding window for sliding window attention.
layer_types (`list`, *optional*):
Attention pattern for each layer. Defaults to sliding window attention
for 3 out of 4 layers, and full attention for every 4th layer.
```python
>>> from transformers import Olmo3Model, Olmo3Config
>>> # Initializing a Olmo3 7B style configuration
>>> configuration = Olmo3Config()
>>> # Initializing a model from the Olmo3 7B style configuration
>>> model = Olmo3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "olmo3"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.k_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.v_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.o_proj": "rowwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 50304,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 11008,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 2048,
initializer_range: Optional[float] = 0.02,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 1,
bos_token_id: Optional[int] = None,
eos_token_id: Optional[int] = 50279,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
rms_norm_eps: Optional[float] = 1e-5,
sliding_window: Optional[int] = 4096,
layer_types: Optional[list[str]] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rms_norm_eps = rms_norm_eps
self.sliding_window = sliding_window
self.layer_types = layer_types
if self.layer_types is None:
self.layer_types = [
"sliding_attention" if (i + 1) % 4 != 0 else "full_attention" for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
| Olmo3Config |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 279221,
"end": 279869
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("DeploymentReviewEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("DeploymentReview"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| DeploymentReviewConnection |
python | sympy__sympy | sympy/core/symbol.py | {
"start": 16132,
"end": 31082
} | class ____(Symbol):
"""
A Wild symbol matches anything, or anything
without whatever is explicitly excluded.
Parameters
==========
name : str
Name of the Wild instance.
exclude : iterable, optional
Instances in ``exclude`` will not be matched.
properties : iterable of functions, optional
Functions, each taking an expressions as input
and returns a ``bool``. All functions in ``properties``
need to return ``True`` in order for the Wild instance
to match the expression.
Examples
========
>>> from sympy import Wild, WildFunction, cos, pi
>>> from sympy.abc import x, y, z
>>> a = Wild('a')
>>> x.match(a)
{a_: x}
>>> pi.match(a)
{a_: pi}
>>> (3*x**2).match(a*x)
{a_: 3*x}
>>> cos(x).match(a)
{a_: cos(x)}
>>> b = Wild('b', exclude=[x])
>>> (3*x**2).match(b*x)
>>> b.match(a)
{a_: b_}
>>> A = WildFunction('A')
>>> A.match(a)
{a_: A_}
Tips
====
When using Wild, be sure to use the exclude
keyword to make the pattern more precise.
Without the exclude pattern, you may get matches
that are technically correct, but not what you
wanted. For example, using the above without
exclude:
>>> from sympy import symbols
>>> a, b = symbols('a b', cls=Wild)
>>> (2 + 3*y).match(a*x + b*y)
{a_: 2/x, b_: 3}
This is technically correct, because
(2/x)*x + 3*y == 2 + 3*y, but you probably
wanted it to not match at all. The issue is that
you really did not want a and b to include x and y,
and the exclude parameter lets you specify exactly
this. With the exclude parameter, the pattern will
not match.
>>> a = Wild('a', exclude=[x, y])
>>> b = Wild('b', exclude=[x, y])
>>> (2 + 3*y).match(a*x + b*y)
Exclude also helps remove ambiguity from matches.
>>> E = 2*x**3*y*z
>>> a, b = symbols('a b', cls=Wild)
>>> E.match(a*b)
{a_: 2*y*z, b_: x**3}
>>> a = Wild('a', exclude=[x, y])
>>> E.match(a*b)
{a_: z, b_: 2*x**3*y}
>>> a = Wild('a', exclude=[x, y, z])
>>> E.match(a*b)
{a_: 2, b_: x**3*y*z}
Wild also accepts a ``properties`` parameter:
>>> a = Wild('a', properties=[lambda k: k.is_Integer])
>>> E.match(a*b)
{a_: 2, b_: x**3*y*z}
"""
is_Wild = True
__slots__ = ('exclude', 'properties')
def __new__(cls, name: str,
exclude: Iterable[Expr | complex] = (),
properties: Iterable[Callable[[Expr], bool | None]] = (),
**assumptions: bool | None,
) -> Self:
exclude = tuple([sympify(x) for x in exclude])
properties = tuple(properties)
cls._sanitize(assumptions, cls)
return Wild.__xnew__(cls, name, exclude, properties, **assumptions)
def __getnewargs__(self):
return (self.name, self.exclude, self.properties)
@staticmethod
@cacheit
def __xnew__(cls, name, exclude, properties, **assumptions):
obj = Symbol.__xnew__(cls, name, **assumptions)
obj.exclude = exclude
obj.properties = properties
return obj
def _hashable_content(self):
return super()._hashable_content() + (self.exclude, self.properties)
# TODO add check against another Wild
def matches(self, expr, repl_dict=None, old=False):
if any(expr.has(x) for x in self.exclude):
return None
if not all(f(expr) for f in self.properties):
return None
if repl_dict is None:
repl_dict = {}
else:
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
_range = _re.compile('([0-9]*:[0-9]+|[a-zA-Z]?:[a-zA-Z])')
@overload
def symbols(names: str, *, cls: type[Symbol] = Symbol, seq: Literal[True],
**kwargs: bool | int) -> tuple[Symbol, ...]: ...
@overload
def symbols(names: str, *, cls: Any = Symbol, seq: Literal[True],
**kwargs: bool | int) -> tuple[Any, ...]: ...
@overload
def symbols(names: str, *, cls: Any = Symbol, seq: Literal[False] = False,
**kwargs: bool | int) -> Any: ...
def symbols(names, *, cls: Any = Symbol, **args) -> Any:
r"""
Transform strings into instances of :class:`Symbol` class.
:func:`symbols` function returns a sequence of symbols with names taken
from ``names`` argument, which can be a comma or whitespace delimited
string, or a sequence of strings::
>>> from sympy import symbols, Function
>>> x, y, z = symbols('x,y,z')
>>> a, b, c = symbols('a b c')
The type of output is dependent on the properties of input arguments::
>>> symbols('x')
x
>>> symbols('x,')
(x,)
>>> symbols('x,y')
(x, y)
>>> symbols(('a', 'b', 'c'))
(a, b, c)
>>> symbols(['a', 'b', 'c'])
[a, b, c]
>>> symbols({'a', 'b', 'c'})
{a, b, c}
If an iterable container is needed for a single symbol, set the ``seq``
argument to ``True`` or terminate the symbol name with a comma::
>>> symbols('x', seq=True)
(x,)
To reduce typing, range syntax is supported to create indexed symbols.
Ranges are indicated by a colon and the type of range is determined by
the character to the right of the colon. If the character is a digit
then all contiguous digits to the left are taken as the nonnegative
starting value (or 0 if there is no digit left of the colon) and all
contiguous digits to the right are taken as 1 greater than the ending
value::
>>> symbols('x:10')
(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
>>> symbols('x5:10')
(x5, x6, x7, x8, x9)
>>> symbols('x5(:2)')
(x50, x51)
>>> symbols('x5:10,y:5')
(x5, x6, x7, x8, x9, y0, y1, y2, y3, y4)
>>> symbols(('x5:10', 'y:5'))
((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4))
If the character to the right of the colon is a letter, then the single
letter to the left (or 'a' if there is none) is taken as the start
and all characters in the lexicographic range *through* the letter to
the right are used as the range::
>>> symbols('x:z')
(x, y, z)
>>> symbols('x:c') # null range
()
>>> symbols('x(:c)')
(xa, xb, xc)
>>> symbols(':c')
(a, b, c)
>>> symbols('a:d, x:z')
(a, b, c, d, x, y, z)
>>> symbols(('a:d', 'x:z'))
((a, b, c, d), (x, y, z))
Multiple ranges are supported; contiguous numerical ranges should be
separated by parentheses to disambiguate the ending number of one
range from the starting number of the next::
>>> symbols('x:2(1:3)')
(x01, x02, x11, x12)
>>> symbols(':3:2') # parsing is from left to right
(00, 01, 10, 11, 20, 21)
Only one pair of parentheses surrounding ranges are removed, so to
include parentheses around ranges, double them. And to include spaces,
commas, or colons, escape them with a backslash::
>>> symbols('x((a:b))')
(x(a), x(b))
>>> symbols(r'x(:1\,:2)') # or r'x((:1)\,(:2))'
(x(0,0), x(0,1))
All newly created symbols have assumptions set according to ``args``::
>>> a = symbols('a', integer=True)
>>> a.is_integer
True
>>> x, y, z = symbols('x,y,z', real=True)
>>> x.is_real and y.is_real and z.is_real
True
Despite its name, :func:`symbols` can create symbol-like objects like
instances of Function or Wild classes. To achieve this, set ``cls``
keyword argument to the desired type::
>>> symbols('f,g,h', cls=Function)
(f, g, h)
>>> type(_[0])
<class 'sympy.core.function.UndefinedFunction'>
"""
result = []
if isinstance(names, str):
marker = 0
splitters = r'\,', r'\:', r'\ '
literals: list[tuple[str, str]] = []
for splitter in splitters:
if splitter in names:
while chr(marker) in names:
marker += 1
lit_char = chr(marker)
marker += 1
names = names.replace(splitter, lit_char)
literals.append((lit_char, splitter[1:]))
def literal(s):
if literals:
for c, l in literals:
s = s.replace(c, l)
return s
names = names.strip()
as_seq = names.endswith(',')
if as_seq:
names = names[:-1].rstrip()
if not names:
raise ValueError('no symbols given')
# split on commas
names = [n.strip() for n in names.split(',')]
if not all(n for n in names):
raise ValueError('missing symbol between commas')
# split on spaces
for i in range(len(names) - 1, -1, -1):
names[i: i + 1] = names[i].split()
seq = args.pop('seq', as_seq)
for name in names:
if not name:
raise ValueError('missing symbol')
if ':' not in name:
symbol = cls(literal(name), **args)
result.append(symbol)
continue
split: list[str] = _range.split(name)
split_list: list[list[str]] = []
# remove 1 layer of bounding parentheses around ranges
for i in range(len(split) - 1):
if i and ':' in split[i] and split[i] != ':' and \
split[i - 1].endswith('(') and \
split[i + 1].startswith(')'):
split[i - 1] = split[i - 1][:-1]
split[i + 1] = split[i + 1][1:]
for s in split:
if ':' in s:
if s.endswith(':'):
raise ValueError('missing end range')
a, b = s.split(':')
if b[-1] in string.digits:
a_i = 0 if not a else int(a)
b_i = int(b)
split_list.append([str(c) for c in range(a_i, b_i)])
else:
a = a or 'a'
split_list.append([string.ascii_letters[c] for c in range(
string.ascii_letters.index(a),
string.ascii_letters.index(b) + 1)]) # inclusive
if not split_list[-1]:
break
else:
split_list.append([s])
else:
seq = True
if len(split_list) == 1:
names = split_list[0]
else:
names = [''.join(s) for s in product(*split_list)]
if literals:
result.extend([cls(literal(s), **args) for s in names])
else:
result.extend([cls(s, **args) for s in names])
if not seq and len(result) <= 1:
if not result:
return ()
return result[0]
return tuple(result)
else:
for name in names:
result.append(symbols(name, cls=cls, **args))
return type(names)(result)
def var(names, **args):
"""
Create symbols and inject them into the global namespace.
Explanation
===========
This calls :func:`symbols` with the same arguments and puts the results
into the *global* namespace. It's recommended not to use :func:`var` in
library code, where :func:`symbols` has to be used::
Examples
========
>>> from sympy import var
>>> var('x')
x
>>> x # noqa: F821
x
>>> var('a,ab,abc')
(a, ab, abc)
>>> abc # noqa: F821
abc
>>> var('x,y', real=True)
(x, y)
>>> x.is_real and y.is_real # noqa: F821
True
See :func:`symbols` documentation for more details on what kinds of
arguments can be passed to :func:`var`.
"""
def traverse(symbols, frame):
"""Recursively inject symbols to the global namespace. """
for symbol in symbols:
if isinstance(symbol, Basic):
frame.f_globals[symbol.name] = symbol
elif isinstance(symbol, FunctionClass):
frame.f_globals[symbol.__name__] = symbol
else:
traverse(symbol, frame)
from inspect import currentframe
frame = currentframe().f_back
try:
syms = symbols(names, **args)
if syms is not None:
if isinstance(syms, Basic):
frame.f_globals[syms.name] = syms
elif isinstance(syms, FunctionClass):
frame.f_globals[syms.__name__] = syms
else:
traverse(syms, frame)
finally:
del frame # break cyclic dependencies as stated in inspect docs
return syms
def disambiguate(*iter):
"""
Return a Tuple containing the passed expressions with symbols
that appear the same when printed replaced with numerically
subscripted symbols, and all Dummy symbols replaced with Symbols.
Parameters
==========
iter: list of symbols or expressions.
Examples
========
>>> from sympy.core.symbol import disambiguate
>>> from sympy import Dummy, Symbol, Tuple
>>> from sympy.abc import y
>>> tup = Symbol('_x'), Dummy('x'), Dummy('x')
>>> disambiguate(*tup)
(x_2, x, x_1)
>>> eqs = Tuple(Symbol('x')/y, Dummy('x')/y)
>>> disambiguate(*eqs)
(x_1/y, x/y)
>>> ix = Symbol('x', integer=True)
>>> vx = Symbol('x')
>>> disambiguate(vx + ix)
(x + x_1,)
To make your own mapping of symbols to use, pass only the free symbols
of the expressions and create a dictionary:
>>> free = eqs.free_symbols
>>> mapping = dict(zip(free, disambiguate(*free)))
>>> eqs.xreplace(mapping)
(x_1/y, x/y)
"""
new_iter = Tuple(*iter)
key = lambda x:tuple(sorted(x.assumptions0.items()))
syms = ordered(new_iter.free_symbols, keys=key)
mapping = {}
for s in syms:
mapping.setdefault(str(s).lstrip('_'), []).append(s)
reps = {}
for k in mapping:
# the first or only symbol doesn't get subscripted but make
# sure that it's a Symbol, not a Dummy
mapk0 = Symbol("%s" % (k), **mapping[k][0].assumptions0)
if mapping[k][0] != mapk0:
reps[mapping[k][0]] = mapk0
# the others get subscripts (and are made into Symbols)
skip = 0
for i in range(1, len(mapping[k])):
while True:
name = "%s_%i" % (k, i + skip)
if name not in mapping:
break
skip += 1
ki = mapping[k][i]
reps[ki] = Symbol(name, **ki.assumptions0)
return new_iter.xreplace(reps)
| Wild |
python | chroma-core__chroma | chromadb/db/system.py | {
"start": 538,
"end": 6207
} | class ____(Component):
"""Data interface for Chroma's System database"""
@abstractmethod
def create_database(
self, id: UUID, name: str, tenant: str = DEFAULT_TENANT
) -> None:
"""Create a new database in the System database. Raises an Error if the Database
already exists."""
pass
@abstractmethod
def get_database(self, name: str, tenant: str = DEFAULT_TENANT) -> Database:
"""Get a database by name and tenant. Raises an Error if the Database does not
exist."""
pass
@abstractmethod
def delete_database(self, name: str, tenant: str = DEFAULT_TENANT) -> None:
"""Delete a database."""
pass
@abstractmethod
def list_databases(
self,
limit: Optional[int] = None,
offset: Optional[int] = None,
tenant: str = DEFAULT_TENANT,
) -> Sequence[Database]:
"""List all databases for a tenant."""
pass
@abstractmethod
def create_tenant(self, name: str) -> None:
"""Create a new tenant in the System database. The name must be unique.
Raises an Error if the Tenant already exists."""
pass
@abstractmethod
def get_tenant(self, name: str) -> Tenant:
"""Get a tenant by name. Raises an Error if the Tenant does not exist."""
pass
# TODO: Investigate and remove this method, as segment creation is done as
# part of collection creation.
@abstractmethod
def create_segment(self, segment: Segment) -> None:
"""Create a new segment in the System database. Raises an Error if the ID
already exists."""
pass
@abstractmethod
def delete_segment(self, collection: UUID, id: UUID) -> None:
"""Delete a segment from the System database."""
pass
@abstractmethod
def get_segments(
self,
collection: UUID,
id: Optional[UUID] = None,
type: Optional[str] = None,
scope: Optional[SegmentScope] = None,
) -> Sequence[Segment]:
"""Find segments by id, type, scope or collection."""
pass
@abstractmethod
def update_segment(
self,
collection: UUID,
id: UUID,
metadata: OptionalArgument[Optional[UpdateMetadata]] = Unspecified(),
) -> None:
"""Update a segment. Unspecified fields will be left unchanged. For the
metadata, keys with None values will be removed and keys not present in the
UpdateMetadata dict will be left unchanged."""
pass
@abstractmethod
def create_collection(
self,
id: UUID,
name: str,
schema: Optional[Schema],
configuration: CreateCollectionConfiguration,
segments: Sequence[Segment],
metadata: Optional[Metadata] = None,
dimension: Optional[int] = None,
get_or_create: bool = False,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> Tuple[Collection, bool]:
"""Create a new collection and associated resources
in the SysDB. If get_or_create is True, the
collection will be created if one with the same name does not exist.
The metadata will be updated using the same protocol as update_collection. If get_or_create
is False and the collection already exists, an error will be raised.
Returns a tuple of the created collection and a boolean indicating whether the
collection was created or not.
"""
pass
@abstractmethod
def delete_collection(
self,
id: UUID,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> None:
"""Delete a collection, all associated segments and any associate resources (log stream)
from the SysDB and the system at large."""
pass
@abstractmethod
def get_collections(
self,
id: Optional[UUID] = None,
name: Optional[str] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> Sequence[Collection]:
"""Find collections by id or name. If name is provided, tenant and database must also be provided."""
pass
@abstractmethod
def count_collections(
self,
tenant: str = DEFAULT_TENANT,
database: Optional[str] = None,
) -> int:
"""Gets the number of collections for the (tenant, database) combination."""
pass
@abstractmethod
def get_collection_with_segments(
self, collection_id: UUID
) -> CollectionAndSegments:
"""Get a consistent snapshot of a collection by id. This will return a collection with segment
information that matches the collection version and log position.
"""
pass
@abstractmethod
def update_collection(
self,
id: UUID,
name: OptionalArgument[str] = Unspecified(),
dimension: OptionalArgument[Optional[int]] = Unspecified(),
metadata: OptionalArgument[Optional[UpdateMetadata]] = Unspecified(),
configuration: OptionalArgument[
Optional[UpdateCollectionConfiguration]
] = Unspecified(),
) -> None:
"""Update a collection. Unspecified fields will be left unchanged. For metadata,
keys with None values will be removed and keys not present in the UpdateMetadata
dict will be left unchanged."""
pass
@abstractmethod
def get_collection_size(self, id: UUID) -> int:
"""Returns the number of records in a collection."""
pass
| SysDB |
python | jina-ai__jina | tests/unit/serve/runtimes/worker/test_worker_request_handler.py | {
"start": 945,
"end": 3931
} | class ____(Executor):
@requests
def foo(self, docs, **kwargs):
docs.clear()
@pytest.fixture()
def logger():
return JinaLogger('data request handler')
@pytest.mark.asyncio
async def test_worker_request_handler_new_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'NewDocsExecutor'])
handler = WorkerRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 1
assert response.docs[0].text == 'new document'
@pytest.mark.asyncio
async def test_aync_worker_request_handler_new_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'AsyncNewDocsExecutor'])
handler = WorkerRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 1
assert response.docs[0].text == 'new document'
@pytest.mark.asyncio
async def test_worker_request_handler_change_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'ChangeDocsExecutor'])
handler = WorkerRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 10
for doc in response.docs:
assert doc.text == 'changed document'
@pytest.mark.asyncio
async def test_worker_request_handler_change_docs_from_partial_requests(logger):
NUM_PARTIAL_REQUESTS = 5
args = set_pod_parser().parse_args(['--uses', 'MergeChangeDocsExecutor'])
handler = WorkerRequestHandler(args, logger)
partial_reqs = [
list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
] * NUM_PARTIAL_REQUESTS
assert len(partial_reqs) == 5
assert len(partial_reqs[0].docs) == 10
response = await handler.handle(requests=partial_reqs)
assert len(response.docs) == 10 * NUM_PARTIAL_REQUESTS
for doc in response.docs:
assert doc.text == 'changed document'
@pytest.mark.asyncio
async def test_worker_request_handler_clear_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'ClearDocsExecutor'])
handler = WorkerRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 0
| ClearDocsExecutor |
python | plotly__plotly.py | plotly/graph_objs/choroplethmap/legendgrouptitle/_font.py | {
"start": 233,
"end": 9957
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choroplethmap.legendgrouptitle"
_path_str = "choroplethmap.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.choroplethmap.
legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choroplethmap.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmap.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B021.py | {
"start": 487,
"end": 520
} | class ____:
"hello world!"
| bar1 |
python | PyCQA__mccabe | mccabe.py | {
"start": 1583,
"end": 2500
} | class ____(object):
def __init__(self, name, entity, lineno, column=0):
self.name = name
self.entity = entity
self.lineno = lineno
self.column = column
self.nodes = defaultdict(list)
def connect(self, n1, n2):
self.nodes[n1].append(n2)
# Ensure that the destination node is always counted.
self.nodes[n2] = []
def to_dot(self):
print('subgraph {')
for node in self.nodes:
node.to_dot()
for node, nexts in self.nodes.items():
for next in nexts:
print('%s -- %s;' % (node.dot_id(), next.dot_id()))
print('}')
def complexity(self):
""" Return the McCabe complexity for the graph.
E-V+2
"""
num_edges = sum([len(n) for n in self.nodes.values()])
num_nodes = len(self.nodes)
return num_edges - num_nodes + 2
| PathGraph |
python | spack__spack | lib/spack/spack/build_environment.py | {
"start": 9387,
"end": 31759
} | class ____:
def __init__(self, pkg: str, exe: str, exe_pkg: str) -> None:
self.pkg = pkg
self.exe = exe
self.exe_pkg = exe_pkg
def __call__(self, *args, **kwargs):
raise UndeclaredDependencyError(
f"{self.pkg} is using {self.exe} without declaring a dependency on {self.exe_pkg}"
)
def add_default_env(self, key: str, value: str):
self.__call__()
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset("LD_LIBRARY_PATH")
env.unset("LD_RUN_PATH")
env.unset("DYLD_LIBRARY_PATH")
env.unset("DYLD_FALLBACK_LIBRARY_PATH")
# These vars affect how the compiler finds libraries and include dirs.
env.unset("LIBRARY_PATH")
env.unset("CPATH")
env.unset("C_INCLUDE_PATH")
env.unset("CPLUS_INCLUDE_PATH")
env.unset("OBJC_INCLUDE_PATH")
# prevent configure scripts from sourcing variables from config site file (AC_SITE_LOAD).
env.set("CONFIG_SITE", os.devnull)
env.unset("CMAKE_PREFIX_PATH")
env.unset("PYTHONPATH")
env.unset("R_HOME")
env.unset("R_ENVIRON")
env.unset("LUA_PATH")
env.unset("LUA_CPATH")
# Affects GNU make, can e.g. indirectly inhibit enabling parallel build
# env.unset('MAKEFLAGS')
# Avoid that libraries of build dependencies get hijacked.
env.unset("LD_PRELOAD")
env.unset("DYLD_INSERT_LIBRARIES")
# Avoid <packagename>_ROOT user variables overriding spack dependencies
# https://cmake.org/cmake/help/latest/variable/PackageName_ROOT.html
# Spack needs SPACK_ROOT though, so we need to exclude that
for varname in os.environ.keys():
if varname.endswith("_ROOT") and varname != "SPACK_ROOT":
env.unset(varname)
# Unset the following variables because they can affect installation of
# Autotools and CMake packages.
build_system_vars = [
"CC",
"CFLAGS",
"CPP",
"CPPFLAGS", # C variables
"CXX",
"CCC",
"CXXFLAGS",
"CXXCPP", # C++ variables
"F77",
"FFLAGS",
"FLIBS", # Fortran77 variables
"FC",
"FCFLAGS",
"FCLIBS", # Fortran variables
"LDFLAGS",
"LIBS", # linker variables
]
for v in build_system_vars:
env.unset(v)
# Unset mpi environment vars. These flags should only be set by
# mpi providers for packages with mpi dependencies
mpi_vars = ["MPICC", "MPICXX", "MPIFC", "MPIF77", "MPIF90"]
for v in mpi_vars:
env.unset(v)
build_lang = spack.config.get("config:build_language")
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set("LC_ALL", build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path("PATH")
for p in path:
if "/macports/" in p:
env.remove_path("PATH", p)
return env
def _add_werror_handling(keep_werror, env):
keep_flags = set()
# set of pairs
replace_flags: List[Tuple[str, str]] = []
if keep_werror == "all":
keep_flags.add("-Werror*")
else:
if keep_werror == "specific":
keep_flags.add("-Werror-*")
keep_flags.add("-Werror=*")
# This extra case is to handle -Werror-implicit-function-declaration
replace_flags.append(("-Werror-", "-Wno-error="))
replace_flags.append(("-Werror", "-Wno-error"))
env.set("SPACK_COMPILER_FLAGS_KEEP", "|".join(keep_flags))
env.set("SPACK_COMPILER_FLAGS_REPLACE", " ".join(["|".join(item) for item in replace_flags]))
def set_wrapper_environment_variables_for_flags(pkg, env):
assert pkg.spec.concrete
spec = pkg.spec
if pkg.keep_werror is not None:
keep_werror = pkg.keep_werror
else:
keep_werror = spack.config.get("config:flags:keep_werror")
_add_werror_handling(keep_werror, env)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepancies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
handler = pkg.flag_handler.__func__
injf, envf, bsf = handler(pkg, flag, spec.compiler_flags[flag][:])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = "SPACK_{0}".format(flag.upper())
env.set(var_name, " ".join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), " ".join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set("SPACK_SYSTEM_DIRS", SYSTEM_DIR_CASE_ENTRY)
return env
def optimization_flags(compiler, target):
# Try to check if the current compiler comes with a version number or
# has an unexpected suffix. If so, treat it as a compiler with a
# custom spec.
version_number, _ = spack.vendor.archspec.cpu.version_components(
compiler.version.dotted_numeric_string
)
try:
result = target.optimization_flags(compiler.name, version_number)
except (ValueError, spack.vendor.archspec.cpu.UnsupportedMicroarchitecture):
result = ""
return result
def set_wrapper_variables(pkg, env):
"""Set environment variables used by the Spack compiler wrapper (which have the prefix
``SPACK_``) and also add the compiler wrappers to PATH.
This determines the injected -L/-I/-rpath options; each of these specifies a search order and
this function computes these options in a manner that is intended to match the DAG traversal
order in ``SetupContext``. TODO: this is not the case yet, we're using post order,
``SetupContext`` is using topo order."""
# Set compiler flags injected from the spec
set_wrapper_environment_variables_for_flags(pkg, env)
# Working directory for the spack command itself, for debug logs.
if spack.config.get("config:debug"):
env.set(SPACK_DEBUG, "TRUE")
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format("{name}-{hash:7}"))
env.set(SPACK_DEBUG_LOG_DIR, spack.paths.spack_working_dir)
if spack.config.get("config:ccache"):
# Enable ccache in the compiler wrapper
env.set(SPACK_CCACHE_BINARY, spack.util.executable.which_string("ccache", required=True))
else:
# Avoid cache pollution if a build system forces `ccache <compiler wrapper invocation>`.
env.set("CCACHE_DISABLE", "1")
# Gather information about various types of dependencies
rpath_hashes = set(s.dag_hash() for s in get_rpath_deps(pkg))
link_deps = pkg.spec.traverse(root=False, order="topo", deptype=dt.LINK)
external_link_deps, nonexternal_link_deps = stable_partition(link_deps, lambda d: d.external)
link_dirs = []
include_dirs = []
rpath_dirs = []
for dep in chain(external_link_deps, nonexternal_link_deps):
# TODO: is_system_path is wrong, but even if we knew default -L, -I flags from the compiler
# and default search dirs from the dynamic linker, it's not obvious how to avoid a possibly
# expensive search in `query.libs.directories` and `query.headers.directories`, which is
# what this branch is trying to avoid.
if is_system_path(dep.prefix):
continue
# TODO: as of Spack 0.22, multiple instances of the same package may occur among the link
# deps, so keying by name is wrong. In practice it is not problematic: we obtain the same
# gcc-runtime / glibc here, and repeatedly add the same dirs that are later deduped.
query = pkg.spec[dep.name]
dep_link_dirs = []
try:
# Locating libraries can be time consuming, so log start and finish.
tty.debug(f"Collecting libraries for {dep.name}")
dep_link_dirs.extend(query.libs.directories)
tty.debug(f"Libraries for {dep.name} have been collected.")
except NoLibrariesError:
tty.debug(f"No libraries found for {dep.name}")
for default_lib_dir in ("lib", "lib64"):
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs[:0] = dep_link_dirs
if dep.dag_hash() in rpath_hashes:
rpath_dirs[:0] = dep_link_dirs
try:
tty.debug(f"Collecting headers for {dep.name}")
include_dirs[:0] = query.headers.directories
tty.debug(f"Headers for {dep.name} have been collected.")
except NoHeadersError:
tty.debug(f"No headers found for {dep.name}")
# The top-level package is heuristically rpath'ed.
for libdir in ("lib64", "lib"):
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.insert(0, lib_path)
# TODO: filter_system_paths is again wrong (and probably unnecessary due to the is_system_path
# branch above). link_dirs should be filtered with entries from _parse_link_paths.
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
default_dynamic_linker_filter = spack.compilers.libraries.dynamic_linker_filter_for(pkg.spec)
if default_dynamic_linker_filter:
rpath_dirs = default_dynamic_linker_filter(rpath_dirs)
# Spack managed directories include the stage, store and upstream stores. We extend this with
# their real paths to make it more robust (e.g. /tmp vs /private/tmp on macOS).
spack_managed_dirs: Set[str] = {
spack.stage.get_stage_root(),
spack.store.STORE.db.root,
*(db.root for db in spack.store.STORE.db.upstream_dbs),
}
spack_managed_dirs.update([os.path.realpath(p) for p in spack_managed_dirs])
env.set(SPACK_MANAGED_DIRS, "|".join(f'"{p}/"*' for p in sorted(spack_managed_dirs)))
is_spack_managed = lambda p: any(p.startswith(store) for store in spack_managed_dirs)
link_dirs_spack, link_dirs_system = stable_partition(link_dirs, is_spack_managed)
include_dirs_spack, include_dirs_system = stable_partition(include_dirs, is_spack_managed)
rpath_dirs_spack, rpath_dirs_system = stable_partition(rpath_dirs, is_spack_managed)
env.set(SPACK_LINK_DIRS, ":".join(link_dirs_system))
env.set(SPACK_INCLUDE_DIRS, ":".join(include_dirs_system))
env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs_system))
env.set(SPACK_STORE_LINK_DIRS, ":".join(link_dirs_spack))
env.set(SPACK_STORE_INCLUDE_DIRS, ":".join(include_dirs_spack))
env.set(SPACK_STORE_RPATH_DIRS, ":".join(rpath_dirs_spack))
def set_package_py_globals(pkg, context: Context = Context.BUILD):
"""Populate the Python module of a package with some useful global names.
This makes things easier for package writers.
"""
module = ModuleChangePropagator(pkg)
jobs = spack.config.determine_number_of_jobs(parallel=pkg.parallel)
module.make_jobs = jobs
module.make = DeprecatedExecutable(pkg.name, "make", "gmake")
module.gmake = DeprecatedExecutable(pkg.name, "gmake", "gmake")
module.ninja = DeprecatedExecutable(pkg.name, "ninja", "ninja")
if sys.platform == "win32":
module.nmake = DeprecatedExecutable(pkg.name, "nmake", "msvc")
module.msbuild = DeprecatedExecutable(pkg.name, "msbuild", "msvc")
# analog to configure for win32
module.cscript = Executable("cscript")
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
module.configure = Executable("./configure")
# Useful directories within the prefix are encapsulated in
# a Prefix object.
module.prefix = pkg.prefix
# Platform-specific library suffix.
module.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get("compiler", module.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(
pkg.spec.architecture, compiler, static_lib, shared_lib, **kwargs
)
module.static_to_shared_library = static_to_shared_library
module.propagate_changes_to_mro()
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None, **kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get("compiler_output", None)
arguments = kwargs.get("arguments", [])
version = kwargs.get("version", None)
compat_version = kwargs.get("compat_version", version)
if not shared_lib:
shared_lib = "{0}.{1}".format(os.path.splitext(static_lib)[0], dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if "linux" in arch or "cray" in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += ".{0}".format(compat_version)
compiler_args = [
"-shared",
"-Wl,-soname,{0}".format(soname),
"-Wl,--whole-archive",
static_lib,
"-Wl,--no-whole-archive",
]
elif "darwin" in arch:
install_name = shared_lib
if compat_version:
install_name += ".{0}".format(compat_version)
compiler_args = [
"-dynamiclib",
"-install_name",
"{0}".format(install_name),
"-Wl,-force_load,{0}".format(static_lib),
]
if compat_version:
compiler_args.extend(["-compatibility_version", "{0}".format(compat_version)])
if version:
compiler_args.extend(["-current_version", "{0}".format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += ".{0}".format(version)
elif compat_version:
shared_lib += ".{0}".format(compat_version)
compiler_args.extend(["-o", shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
symlink(shared_lib_link, "{0}.{1}".format(shared_lib_base, compat_version))
return compiler(*compiler_args, output=compiler_output)
def _get_rpath_deps_from_spec(
spec: spack.spec.Spec, transitive_rpaths: bool
) -> List[spack.spec.Spec]:
if not transitive_rpaths:
return spec.dependencies(deptype=dt.LINK)
by_name: Dict[str, spack.spec.Spec] = {}
for dep in spec.traverse(root=False, deptype=dt.LINK):
lookup = by_name.get(dep.name)
if lookup is None:
by_name[dep.name] = dep
elif lookup.version < dep.version:
by_name[dep.name] = dep
return list(by_name.values())
def get_rpath_deps(pkg: spack.package_base.PackageBase) -> List[spack.spec.Spec]:
"""Return immediate or transitive dependencies (depending on the package) that need to be
rpath'ed. If a package occurs multiple times, the newest version is kept."""
return _get_rpath_deps_from_spec(pkg.spec, pkg.transitive_rpaths)
def get_cmake_prefix_path(pkg: spack.package_base.PackageBase) -> List[str]:
"""Obtain the ``CMAKE_PREFIX_PATH`` entries for a package, based on the
:attr:`~spack.package_base.PackageBase.cmake_prefix_paths` package attribute of direct
build/test and transitive link dependencies."""
edges = traverse.traverse_topo_edges_generator(
traverse.with_artificial_edges([pkg.spec]),
visitor=traverse.MixedDepthVisitor(
direct=dt.BUILD | dt.TEST, transitive=dt.LINK, key=traverse.by_dag_hash
),
key=traverse.by_dag_hash,
root=False,
all_edges=False, # cover all nodes, not all edges
)
ordered_specs = [edge.spec for edge in edges]
# Separate out externals so they do not shadow Spack prefixes
externals, spack_built = stable_partition((s for s in ordered_specs), lambda x: x.external)
return filter_system_paths(
path for spec in chain(spack_built, externals) for path in spec.package.cmake_prefix_paths
)
def setup_package(pkg, dirty, context: Context = Context.BUILD):
"""Execute all environment setup routines."""
if context not in (Context.BUILD, Context.TEST):
raise ValueError(f"'context' must be Context.BUILD or Context.TEST - got {context}")
# First populate the package.py's module with the relevant globals that could be used in any
# of the setup_* functions.
setup_context = SetupContext(pkg.spec, context=context)
setup_context.set_all_package_py_globals()
# Keep track of env changes from packages separately, since we want to
# issue warnings when packages make "suspicious" modifications.
env_base = EnvironmentModifications() if dirty else clean_environment()
env_mods = EnvironmentModifications()
# setup compilers for build contexts
need_compiler = context == Context.BUILD or (
context == Context.TEST and pkg.test_requires_compiler
)
if need_compiler:
set_wrapper_variables(pkg, env_mods)
# Platform specific setup goes before package specific setup. This is for setting
# defaults like MACOSX_DEPLOYMENT_TARGET on macOS.
platform = spack.platforms.by_name(pkg.spec.architecture.platform)
platform.setup_platform_environment(pkg, env_mods)
tty.debug("setup_package: grabbing modifications from dependencies")
env_mods.extend(setup_context.get_env_modifications())
tty.debug("setup_package: collected all modifications from dependencies")
tty.debug("setup_package: adding compiler wrappers paths")
env_by_name = env_mods.group_by_name()
for x in env_by_name["SPACK_COMPILER_WRAPPER_PATH"]:
assert isinstance(
x, PrependPath
), "unexpected setting used for SPACK_COMPILER_WRAPPER_PATH"
env_mods.prepend_path("PATH", x.value)
# Check whether we want to force RPATH or RUNPATH
enable_var_name, disable_var_name = "SPACK_ENABLE_NEW_DTAGS", "SPACK_DISABLE_NEW_DTAGS"
if enable_var_name in env_by_name and disable_var_name in env_by_name:
enable_new_dtags = _extract_dtags_arg(env_by_name, var_name=enable_var_name)
disable_new_dtags = _extract_dtags_arg(env_by_name, var_name=disable_var_name)
if spack.config.CONFIG.get("config:shared_linking:type") == "rpath":
env_mods.set("SPACK_DTAGS_TO_STRIP", enable_new_dtags)
env_mods.set("SPACK_DTAGS_TO_ADD", disable_new_dtags)
else:
env_mods.set("SPACK_DTAGS_TO_STRIP", disable_new_dtags)
env_mods.set("SPACK_DTAGS_TO_ADD", enable_new_dtags)
if context == Context.TEST:
env_mods.prepend_path("PATH", ".")
elif context == Context.BUILD and not dirty and not env_mods.is_unset("CPATH"):
tty.debug(
"A dependency has updated CPATH, this may lead pkg-config to assume that the package "
"is part of the system includes and omit it when invoked with '--cflags'."
)
# First apply the clean environment changes
env_base.apply_modifications()
# Load modules on an already clean environment, just before applying Spack's
# own environment modifications. This ensures Spack controls CC/CXX/... variables.
load_external_modules(setup_context)
# Make sure nothing's strange about the Spack environment.
validate(env_mods, tty.warn)
env_mods.apply_modifications()
# Return all env modifications we controlled (excluding module related ones)
env_base.extend(env_mods)
return env_base
def _extract_dtags_arg(env_by_name: Dict[str, ModificationList], *, var_name: str) -> str:
try:
enable_new_dtags = env_by_name[var_name][0].value # type: ignore[union-attr]
except (KeyError, IndexError, AttributeError):
enable_new_dtags = ""
return enable_new_dtags
| DeprecatedExecutable |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 21622,
"end": 21920
} | class ____(_DgConfigErrorRecord):
key: str
expected_type_str: str
value_str: str
@property
def message(self) -> str:
return f"Invalid value for `{self.key}`:\n Expected: {self.expected_type_str}\n Received: {self.value_str}"
@record
| _DgConfigInvalidValueErrorRecord |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/connectors/aioodbc.py | {
"start": 4331,
"end": 4905
} | class ____(PyODBCConnector):
is_async = True
supports_statement_cache = True
supports_server_side_cursors = True
@classmethod
def import_dbapi(cls):
return AsyncAdapt_aioodbc_dbapi(
__import__("aioodbc"), __import__("pyodbc")
)
def create_connect_args(self, url: URL) -> ConnectArgsType:
arg, kw = super().create_connect_args(url)
if arg and arg[0]:
kw["dsn"] = arg[0]
return (), kw
def get_driver_connection(self, connection):
return connection._connection
| aiodbcConnector |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py | {
"start": 61891,
"end": 64280
} | class ____(nn.Module):
"""Image-audio embedding."""
def __init__(self, config: Phi4MultimodalConfig) -> None:
super().__init__()
self.config = config
self.image_token_id = config.vision_config.image_token_id
self.audio_token_id = config.audio_config.audio_token_id
self.image_embed = Phi4MultimodalImageEmbedding(config)
self.audio_embed = Phi4MultimodalAudioEmbedding(config)
def forward(
self,
input_ids: torch.LongTensor,
inputs_embeds: torch.Tensor,
image_pixel_values: Optional[torch.FloatTensor] = None,
audio_input_features: Optional[torch.FloatTensor] = None,
image_sizes=None,
image_attention_mask=None,
audio_embed_sizes=None,
audio_attention_mask=None,
) -> torch.FloatTensor:
with torch.no_grad():
image_position_mask = (input_ids == self.config.vision_config.image_token_id).unsqueeze(-1)
non_image_position_mask = ~image_position_mask
image_embeds = None
audio_embeds = None
if image_pixel_values is not None and (input_ids == self.image_token_id).any():
image_embeds = self.image_embed(
input_ids,
inputs_embeds,
image_pixel_values=image_pixel_values,
image_sizes=image_sizes,
image_attention_mask=image_attention_mask,
)
if audio_input_features is not None and (input_ids == self.audio_token_id).any():
audio_projection_mode = "vision" if image_pixel_values is not None else "speech"
audio_embeds = self.audio_embed(
input_ids,
inputs_embeds,
audio_input_features=audio_input_features,
audio_embed_sizes=audio_embed_sizes,
audio_attention_mask=audio_attention_mask,
audio_projection_mode=audio_projection_mode,
)
# merge image and audio
if image_embeds is not None and audio_embeds is not None:
inputs_embeds = image_embeds * image_position_mask + audio_embeds * non_image_position_mask
elif image_embeds is not None:
inputs_embeds = image_embeds
elif audio_embeds is not None:
inputs_embeds = audio_embeds
return inputs_embeds
| Phi4MultimodalFeatureEmbedding |
python | huggingface__transformers | src/transformers/models/pixtral/image_processing_pixtral.py | {
"start": 5379,
"end": 22290
} | class ____(BaseImageProcessor):
r"""
Constructs a Pixtral image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"longest_edge": 1024}`):
Size of the maximum dimension of either the height or width dimension of the image. Used to control how
images are resized. If either the height or width are greater than `size["longest_edge"]` then both the height and width are rescaled by `height / ratio`, `width /ratio` where `ratio = max(height / longest_edge, width / longest_edge)`
patch_size (`dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`):
Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values", "image_sizes"]
valid_kwargs = PixtralImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
patch_size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"longest_edge": 1024}
patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
patch_size = get_size_dict(patch_size, default_to_square=True)
self.do_resize = do_resize
self.size = size
self.patch_size = patch_size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else [0.48145466, 0.4578275, 0.40821073]
self.image_std = image_std if image_std is not None else [0.26862954, 0.26130258, 0.27577711]
self.do_convert_rgb = do_convert_rgb
self._valid_processor_keys = [
"images",
"do_resize",
"size",
"patch_size",
"resample",
"do_rescale",
"rescale_factor",
"do_normalize",
"image_mean",
"image_std",
"do_convert_rgb",
"return_tensors",
"data_format",
"input_data_format",
]
def resize(
self,
image: np.ndarray,
size: dict[str, int],
patch_size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dict containing the longest possible edge of the image.
patch_size (`dict[str, int]`):
Patch size used to calculate the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
if "longest_edge" in size:
size = (size["longest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.")
if "height" in patch_size and "width" in patch_size:
patch_size = (patch_size["height"], patch_size["width"])
else:
raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(
image,
size=size,
patch_size=patch_size,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def _pad_for_batching(
self,
pixel_values: list[np.ndarray],
image_sizes: list[list[int]],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
Args:
pixel_values (`list[np.ndarray]`):
An array of pixel values of each images of shape (`batch_size`, `height`, `width`, `channels`)
image_sizes (`list[list[int]]`):
A list of sizes for each image in `pixel_values` in (height, width) format.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
list[`np.ndarray`]: The padded images.
"""
max_shape = (
max(size[0] for size in image_sizes),
max(size[1] for size in image_sizes),
)
pixel_values = [
pad(
image,
padding=((0, max_shape[0] - size[0]), (0, max_shape[1] - size[1])),
data_format=data_format,
input_data_format=input_data_format,
)
for image, size in zip(pixel_values, image_sizes)
]
return pixel_values
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
patch_size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Describes the maximum input dimensions to the model.
patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
Patch size in the model. Used to calculate the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
patch_size = patch_size if patch_size is not None else self.patch_size
patch_size = get_size_dict(patch_size, default_to_square=True)
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images[0]):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
batch_images = []
batch_image_sizes = []
for image in images:
if do_resize:
image = self.resize(
image=image,
size=size,
patch_size=patch_size,
resample=resample,
input_data_format=input_data_format,
)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
batch_images.append(image)
batch_image_sizes.append(get_image_size(image, data_format))
pixel_values = self._pad_for_batching(
pixel_values=batch_images,
image_sizes=batch_image_sizes,
input_data_format=data_format,
data_format=data_format,
)
return BatchFeature(
data={"pixel_values": pixel_values, "image_sizes": batch_image_sizes}, tensor_type=return_tensors
)
__all__ = ["PixtralImageProcessor"]
| PixtralImageProcessor |
python | kamyu104__LeetCode-Solutions | Python/maximum-equal-frequency.py | {
"start": 50,
"end": 661
} | class ____(object):
def maxEqualFreq(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
count = collections.Counter()
freq = [0 for _ in xrange(len(nums)+1)]
for i, n in enumerate(nums, 1):
freq[count[n]] -= 1
freq[count[n]+1] += 1
count[n] += 1
c = count[n]
if freq[c]*c == i and i < len(nums):
result = i+1
remain = i-freq[c]*c
if freq[remain] == 1 and remain in [1, c+1]:
result = i
return result
| Solution |
python | numpy__numpy | numpy/_core/tests/test_numerictypes.py | {
"start": 6164,
"end": 6357
} | class ____(CreateValues):
"""Check the creation of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
| TestCreateValuesPlainMultiple |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 537122,
"end": 537792
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("PullRequestReviewCommentEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("PullRequestReviewComment"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| PullRequestReviewCommentConnection |
python | Textualize__textual | tests/input/test_select_on_focus.py | {
"start": 237,
"end": 1141
} | class ____(App[None]):
"""An app with an input widget."""
def compose(self) -> ComposeResult:
yield Input("Hello, world!")
async def test_focus_from_app_focus_does_not_select():
"""When an Input has focused and the *app* is blurred and then focused (e.g. by pressing
alt+tab or focusing another terminal pane), then the content of the Input should not be
fully selected when `Input.select_on_focus=True`.
"""
async with InputApp().run_test() as pilot:
input_widget = pilot.app.query_one(Input)
input_widget.focus()
input_widget.selection = Selection.cursor(0)
assert input_widget.selection == Selection.cursor(0)
pilot.app.post_message(events.AppBlur())
await pilot.pause()
pilot.app.post_message(events.AppFocus())
await pilot.pause()
assert input_widget.selection == Selection.cursor(0)
| InputApp |
python | getsentry__sentry | src/sentry/sentry_apps/external_issues/issue_link_creator.py | {
"start": 668,
"end": 2145
} | class ____:
install: RpcSentryAppInstallation
group: Group
action: str
fields: dict[str, Any]
uri: str
user: RpcUser
def run(self) -> PlatformExternalIssue:
with transaction.atomic(using=router.db_for_write(PlatformExternalIssue)):
self._verify_action()
response = self._make_external_request()
external_issue = self._create_external_issue(response=response)
return external_issue
def _verify_action(self) -> None:
try:
self.action = IssueRequestActionType(self.action)
except ValueError as e:
raise SentryAppSentryError(
message=f"Invalid action: {self.action}", status_code=500
) from e
def _make_external_request(self) -> dict[str, Any]:
response = IssueLinkRequester(
install=self.install,
uri=self.uri,
group=self.group,
fields=self.fields,
user=self.user,
action=IssueRequestActionType(self.action),
).run()
return response
def _create_external_issue(self, response: dict[str, Any]) -> PlatformExternalIssue:
external_issue = ExternalIssueCreator(
install=self.install,
group=self.group,
web_url=response["webUrl"],
project=response["project"],
identifier=response["identifier"],
).run()
return external_issue
| IssueLinkCreator |
python | great-expectations__great_expectations | tests/integration/fixtures/partition_and_sample_data/partitioner_test_cases_and_fixtures.py | {
"start": 9546,
"end": 11166
} | class ____(TaxiPartitioningTestCasesBase):
@override
def test_cases(self) -> List[TaxiPartitioningTestCase]:
return [
TaxiPartitioningTestCase(
table_domain_test_case=False,
num_expected_batch_definitions=3,
num_expected_rows_in_first_batch_definition=120,
expected_column_values=self.taxi_test_data.year_batch_identifier_data(),
add_batch_definition_method_name="add_batch_definition_yearly",
add_batch_definition_kwargs={"column": self.taxi_test_data.test_column_name},
),
TaxiPartitioningTestCase(
table_domain_test_case=False,
num_expected_batch_definitions=36,
num_expected_rows_in_first_batch_definition=10,
expected_column_values=self.taxi_test_data.year_month_batch_identifier_data(),
add_batch_definition_method_name="add_batch_definition_monthly",
add_batch_definition_kwargs={"column": self.taxi_test_data.test_column_name},
),
TaxiPartitioningTestCase(
table_domain_test_case=False,
num_expected_batch_definitions=299,
num_expected_rows_in_first_batch_definition=2,
expected_column_values=self.taxi_test_data.year_month_day_batch_identifier_data(),
add_batch_definition_method_name="add_batch_definition_daily",
add_batch_definition_kwargs={"column": self.taxi_test_data.test_column_name},
),
]
| TaxiPartitioningTestCasesDateTime |
python | tensorflow__tensorflow | tensorflow/python/framework/flexible_dtypes_test.py | {
"start": 3005,
"end": 44290
} | class ____(tf_test.TestCase, parameterized.TestCase):
# Test all possible TF dtypes in ALL mode.
@parameterized.parameters(
(dtypes.bool, dtypes.bool, (dtypes.bool, False)),
(dtypes.bool, dtypes.uint8, (dtypes.uint8, False)),
(dtypes.bool, dtypes.uint16, (dtypes.uint16, False)),
(dtypes.bool, dtypes.uint32, (dtypes.uint32, False)),
(dtypes.bool, dtypes.uint64, (dtypes.uint64, False)),
(dtypes.bool, dtypes.int8, (dtypes.int8, False)),
(dtypes.bool, dtypes.int16, (dtypes.int16, False)),
(dtypes.bool, dtypes.int32, (dtypes.int32, False)),
(dtypes.bool, dtypes.int64, (dtypes.int64, False)),
(dtypes.bool, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.bool, dtypes.float16, (dtypes.float16, False)),
(dtypes.bool, dtypes.float32, (dtypes.float32, False)),
(dtypes.bool, dtypes.float64, (dtypes.float64, False)),
(dtypes.bool, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.bool, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.uint8, dtypes.uint8, (dtypes.uint8, False)),
(dtypes.uint8, dtypes.uint16, (dtypes.uint16, False)),
(dtypes.uint8, dtypes.uint32, (dtypes.uint32, False)),
(dtypes.uint8, dtypes.uint64, (dtypes.uint64, False)),
(dtypes.uint8, dtypes.int8, (dtypes.int16, False)),
(dtypes.uint8, dtypes.int16, (dtypes.int16, False)),
(dtypes.uint8, dtypes.int32, (dtypes.int32, False)),
(dtypes.uint8, dtypes.int64, (dtypes.int64, False)),
(dtypes.uint8, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.uint8, dtypes.float16, (dtypes.float16, False)),
(dtypes.uint8, dtypes.float32, (dtypes.float32, False)),
(dtypes.uint8, dtypes.float64, (dtypes.float64, False)),
(dtypes.uint8, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.uint8, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.uint16, dtypes.uint16, (dtypes.uint16, False)),
(dtypes.uint16, dtypes.uint32, (dtypes.uint32, False)),
(dtypes.uint16, dtypes.uint64, (dtypes.uint64, False)),
(dtypes.uint16, dtypes.int8, (dtypes.int32, False)),
(dtypes.uint16, dtypes.int16, (dtypes.int32, False)),
(dtypes.uint16, dtypes.int32, (dtypes.int32, False)),
(dtypes.uint16, dtypes.int64, (dtypes.int64, False)),
(dtypes.uint16, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.uint16, dtypes.float16, (dtypes.float16, False)),
(dtypes.uint16, dtypes.float32, (dtypes.float32, False)),
(dtypes.uint16, dtypes.float64, (dtypes.float64, False)),
(dtypes.uint16, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.uint16, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.uint32, dtypes.uint32, (dtypes.uint32, False)),
(dtypes.uint32, dtypes.uint64, (dtypes.uint64, False)),
(dtypes.uint32, dtypes.int8, (dtypes.int64, False)),
(dtypes.uint32, dtypes.int16, (dtypes.int64, False)),
(dtypes.uint32, dtypes.int32, (dtypes.int64, False)),
(dtypes.uint32, dtypes.int64, (dtypes.int64, False)),
(dtypes.uint32, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.uint32, dtypes.float16, (dtypes.float16, False)),
(dtypes.uint32, dtypes.float32, (dtypes.float32, False)),
(dtypes.uint32, dtypes.float64, (dtypes.float64, False)),
(dtypes.uint32, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.uint32, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.uint64, dtypes.uint64, (dtypes.uint64, False)),
(dtypes.uint64, dtypes.uint64, (dtypes.uint64, False)),
(dtypes.uint64, dtypes.int16, (dtypes.float64, True)),
(dtypes.uint64, dtypes.int32, (dtypes.float64, True)),
(dtypes.uint64, dtypes.int64, (dtypes.float64, True)),
(dtypes.uint64, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.uint64, dtypes.float16, (dtypes.float16, False)),
(dtypes.uint64, dtypes.float32, (dtypes.float32, False)),
(dtypes.uint64, dtypes.float64, (dtypes.float64, False)),
(dtypes.uint64, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.uint64, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.int8, dtypes.int8, (dtypes.int8, False)),
(dtypes.int8, dtypes.int16, (dtypes.int16, False)),
(dtypes.int8, dtypes.int32, (dtypes.int32, False)),
(dtypes.int8, dtypes.int64, (dtypes.int64, False)),
(dtypes.int8, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.int8, dtypes.float16, (dtypes.float16, False)),
(dtypes.int8, dtypes.float32, (dtypes.float32, False)),
(dtypes.int8, dtypes.float64, (dtypes.float64, False)),
(dtypes.int8, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.int8, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.int16, dtypes.int16, (dtypes.int16, False)),
(dtypes.int16, dtypes.int32, (dtypes.int32, False)),
(dtypes.int16, dtypes.int64, (dtypes.int64, False)),
(dtypes.int16, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.int16, dtypes.float16, (dtypes.float16, False)),
(dtypes.int16, dtypes.float32, (dtypes.float32, False)),
(dtypes.int16, dtypes.float64, (dtypes.float64, False)),
(dtypes.int16, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.int16, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.int32, dtypes.int32, (dtypes.int32, False)),
(dtypes.int32, dtypes.int64, (dtypes.int64, False)),
(dtypes.int32, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.int32, dtypes.float16, (dtypes.float16, False)),
(dtypes.int32, dtypes.float32, (dtypes.float32, False)),
(dtypes.int32, dtypes.float64, (dtypes.float64, False)),
(dtypes.int32, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.int32, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.int64, dtypes.int64, (dtypes.int64, False)),
(dtypes.int64, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.int64, dtypes.float16, (dtypes.float16, False)),
(dtypes.int64, dtypes.float32, (dtypes.float32, False)),
(dtypes.int64, dtypes.float64, (dtypes.float64, False)),
(dtypes.int64, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.int64, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.bfloat16, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.bfloat16, dtypes.float16, (dtypes.float32, False)),
(dtypes.bfloat16, dtypes.float32, (dtypes.float32, False)),
(dtypes.bfloat16, dtypes.float64, (dtypes.float64, False)),
(dtypes.bfloat16, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.bfloat16, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.float16, dtypes.float16, (dtypes.float16, False)),
(dtypes.float16, dtypes.float32, (dtypes.float32, False)),
(dtypes.float16, dtypes.float64, (dtypes.float64, False)),
(dtypes.float16, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.float16, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.float32, dtypes.float32, (dtypes.float32, False)),
(dtypes.float32, dtypes.float64, (dtypes.float64, False)),
(dtypes.float32, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.float32, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.float64, dtypes.float64, (dtypes.float64, False)),
(dtypes.float64, dtypes.complex64, (dtypes.complex128, False)),
(dtypes.float64, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.complex64, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.complex64, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.complex128, dtypes.complex128, (dtypes.complex128, False)),
)
def testResultTypeTFAndTF(self, a_dtype, b_dtype, res_dtype):
with DtypeConversionTestEnv('all'):
input_a = (
constant_op.constant(1, dtype=a_dtype)
if a_dtype != dtypes.bool
else constant_op.constant(True)
)
input_b = (
constant_op.constant(2, dtype=b_dtype)
if b_dtype != dtypes.bool
else constant_op.constant(False)
)
self.assertEqual(
flexible_dtypes.result_type(input_a, input_b),
res_dtype,
)
# Test NP types dtype inference.
@parameterized.parameters(
(np.bool_, np.uint8, dtypes.uint8),
(np.uint8, np.int8, dtypes.int16),
(np.uint16, np.int8, dtypes.int32),
(np.uint32, np.float16, dtypes.float16),
(np.uint64, np.float16, dtypes.float16),
(np.uint64, np.complex64, dtypes.complex64),
(np.int8, np.float16, dtypes.float16),
(np.int16, np.float16, dtypes.float16),
(np.int32, np.complex64, dtypes.complex64),
(np.int64, np.float16, dtypes.float16),
(np.float16, np.complex64, dtypes.complex64),
(np.float32, np.float64, dtypes.float64),
(np.float64, np.complex128, dtypes.complex128),
(np.complex64, np.complex64, dtypes.complex64),
(np.complex64, np.complex128, dtypes.complex128),
)
def testResultTypeNPAndNP(self, a_dtype, b_dtype, res_dtype):
with DtypeConversionTestEnv('all'):
self.assertEqual(
flexible_dtypes.result_type(
np.array(1, dtype=a_dtype), np.array(2, dtype=b_dtype)
),
(res_dtype, False),
)
# Test np.array with default types.
# np.array(int) => i64
# np.array(float) => f64
# np.array(complex) => complex128
@parameterized.parameters(
(1, np.bool_, (dtypes.int64, False)),
(1, np.uint8, (dtypes.int64, False)),
(1, np.uint16, (dtypes.int64, False)),
(1, np.uint32, (dtypes.int64, False)),
(1, np.uint64, (dtypes.float64, True)),
(1, np.int8, (dtypes.int64, False)),
(1, np.int16, (dtypes.int64, False)),
(1, np.int32, (dtypes.int64, False)),
(1, np.int64, (dtypes.int64, False)),
(1, np.float16, (dtypes.float16, False)),
(1, np.float32, (dtypes.float32, False)),
(1, np.float64, (dtypes.float64, False)),
(1, np.complex64, (dtypes.complex64, False)),
(1, np.complex128, (dtypes.complex128, False)),
(1.0, np.bool_, (dtypes.float64, False)),
(1.0, np.uint8, (dtypes.float64, False)),
(1.0, np.uint16, (dtypes.float64, False)),
(1.0, np.uint32, (dtypes.float64, False)),
(1.0, np.uint64, (dtypes.float64, False)),
(1.0, np.int8, (dtypes.float64, False)),
(1.0, np.int16, (dtypes.float64, False)),
(1.0, np.int32, (dtypes.float64, False)),
(1.0, np.int64, (dtypes.float64, False)),
(1.0, np.float16, (dtypes.float64, False)),
(1.0, np.float32, (dtypes.float64, False)),
(1.0, np.float64, (dtypes.float64, False)),
(1.0, np.complex64, (dtypes.complex128, False)),
(1.0, np.complex128, (dtypes.complex128, False)),
(1.0j, np.bool_, (dtypes.complex128, False)),
(1.0j, np.uint8, (dtypes.complex128, False)),
(1.0j, np.uint16, (dtypes.complex128, False)),
(1.0j, np.uint32, (dtypes.complex128, False)),
(1.0j, np.uint64, (dtypes.complex128, False)),
(1.0j, np.int8, (dtypes.complex128, False)),
(1.0j, np.int16, (dtypes.complex128, False)),
(1.0j, np.int32, (dtypes.complex128, False)),
(1.0j, np.int64, (dtypes.complex128, False)),
(1.0j, np.float16, (dtypes.complex128, False)),
(1.0j, np.float32, (dtypes.complex128, False)),
(1.0j, np.float64, (dtypes.complex128, False)),
(1.0j, np.complex64, (dtypes.complex128, False)),
(1.0j, np.complex128, (dtypes.complex128, False)),
)
def testResultTypeNPDefaultArray(self, array_in, dtype, res_dtype):
with DtypeConversionTestEnv('all'):
self.assertEqual(
flexible_dtypes.result_type(
np.array(array_in), np.array(1, dtype=dtype)
),
res_dtype,
)
# Test Python int inputs. Note that Python int literals are converted into
# weak int32 type.
@parameterized.parameters(
(dtypes.bool, (dtypes.int32, True)),
(dtypes.uint8, (dtypes.uint8, False)),
(dtypes.uint16, (dtypes.uint16, False)),
(dtypes.uint32, (dtypes.uint32, False)),
(dtypes.uint64, (dtypes.uint64, False)),
(dtypes.int8, (dtypes.int8, False)),
(dtypes.int16, (dtypes.int16, False)),
(dtypes.int32, (dtypes.int32, False)),
(dtypes.int64, (dtypes.int64, False)),
(dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.float16, (dtypes.float16, False)),
(dtypes.float32, (dtypes.float32, False)),
(dtypes.float64, (dtypes.float64, False)),
(dtypes.complex64, (dtypes.complex64, False)),
(dtypes.complex128, (dtypes.complex128, False)),
)
def testResultTypePythonInt(self, input_dtype, res_dtype):
with DtypeConversionTestEnv('all'):
t_input = (
constant_op.constant(2, dtype=input_dtype)
if input_dtype != dtypes.bool
else constant_op.constant(True)
)
self.assertEqual(flexible_dtypes.result_type(1, t_input), res_dtype)
# Test Python float inputs. Note that Python float literals are converted into
# weak float32 type.
@parameterized.parameters(
(dtypes.bool, (dtypes.float32, True)),
(dtypes.uint8, (dtypes.float64, True)),
(dtypes.uint16, (dtypes.float64, True)),
(dtypes.uint32, (dtypes.float64, True)),
(dtypes.uint64, (dtypes.float64, True)),
(dtypes.int8, (dtypes.float64, True)),
(dtypes.int16, (dtypes.float64, True)),
(dtypes.int32, (dtypes.float64, True)),
(dtypes.int64, (dtypes.float64, True)),
(dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.float16, (dtypes.float16, False)),
(dtypes.float32, (dtypes.float32, False)),
(dtypes.float64, (dtypes.float64, False)),
(dtypes.complex64, (dtypes.complex64, False)),
(dtypes.complex128, (dtypes.complex128, False)),
)
def testResultTypePythonFloat(self, input_dtype, res_dtype):
with DtypeConversionTestEnv('all'):
t_input = (
constant_op.constant(2, dtype=input_dtype)
if input_dtype != dtypes.bool
else constant_op.constant(True)
)
self.assertEqual(flexible_dtypes.result_type(1.0, t_input), res_dtype)
# Test Python complex inputs. Note that Python complex literals are converted
# into weak complex128 type.
@parameterized.parameters(
(dtypes.bool, (dtypes.complex128, True)),
(dtypes.uint8, (dtypes.complex128, True)),
(dtypes.uint16, (dtypes.complex128, True)),
(dtypes.uint32, (dtypes.complex128, True)),
(dtypes.uint64, (dtypes.complex128, True)),
(dtypes.int8, (dtypes.complex128, True)),
(dtypes.int16, (dtypes.complex128, True)),
(dtypes.int32, (dtypes.complex128, True)),
(dtypes.int64, (dtypes.complex128, True)),
(dtypes.bfloat16, (dtypes.complex64, False)),
(dtypes.float16, (dtypes.complex64, False)),
(dtypes.float32, (dtypes.complex64, False)),
(dtypes.float64, (dtypes.complex128, False)),
(dtypes.complex64, (dtypes.complex64, False)),
(dtypes.complex128, (dtypes.complex128, False)),
)
def testResultTypePythonComplex(self, input_dtype, res_dtype):
with DtypeConversionTestEnv('all'):
t_input = (
constant_op.constant(2, dtype=input_dtype)
if input_dtype != dtypes.bool
else constant_op.constant(True)
)
self.assertEqual(flexible_dtypes.result_type(1.0j, t_input), res_dtype)
# Test every possible weak type + TF dtype.
@parameterized.parameters(
(dtypes.int32, dtypes.bool, (dtypes.int32, True)),
(dtypes.int32, dtypes.uint8, (dtypes.uint8, False)),
(dtypes.int32, dtypes.uint16, (dtypes.uint16, False)),
(dtypes.int32, dtypes.uint32, (dtypes.uint32, False)),
(dtypes.int32, dtypes.uint64, (dtypes.uint64, False)),
(dtypes.int32, dtypes.int8, (dtypes.int8, False)),
(dtypes.int32, dtypes.int16, (dtypes.int16, False)),
(dtypes.int32, dtypes.int32, (dtypes.int32, False)),
(dtypes.int32, dtypes.int64, (dtypes.int64, False)),
(dtypes.int32, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.int32, dtypes.float16, (dtypes.float16, False)),
(dtypes.int32, dtypes.float32, (dtypes.float32, False)),
(dtypes.int32, dtypes.float64, (dtypes.float64, False)),
(dtypes.int32, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.int32, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.int64, dtypes.bool, (dtypes.int64, True)),
(dtypes.int64, dtypes.uint8, (dtypes.uint8, False)),
(dtypes.int64, dtypes.uint16, (dtypes.uint16, False)),
(dtypes.int64, dtypes.uint32, (dtypes.uint32, False)),
(dtypes.int64, dtypes.uint64, (dtypes.uint64, False)),
(dtypes.int64, dtypes.int8, (dtypes.int8, False)),
(dtypes.int64, dtypes.int16, (dtypes.int16, False)),
(dtypes.int64, dtypes.int32, (dtypes.int32, False)),
(dtypes.int64, dtypes.int64, (dtypes.int64, False)),
(dtypes.int32, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.int64, dtypes.float16, (dtypes.float16, False)),
(dtypes.int64, dtypes.float32, (dtypes.float32, False)),
(dtypes.int64, dtypes.float64, (dtypes.float64, False)),
(dtypes.int64, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.int64, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.float32, dtypes.bool, (dtypes.float32, True)),
(dtypes.float32, dtypes.uint8, (dtypes.float64, True)),
(dtypes.float32, dtypes.uint16, (dtypes.float64, True)),
(dtypes.float32, dtypes.uint32, (dtypes.float64, True)),
(dtypes.float32, dtypes.uint64, (dtypes.float64, True)),
(dtypes.float32, dtypes.int8, (dtypes.float64, True)),
(dtypes.float32, dtypes.int16, (dtypes.float64, True)),
(dtypes.float32, dtypes.int32, (dtypes.float64, True)),
(dtypes.float32, dtypes.int64, (dtypes.float64, True)),
(dtypes.float32, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.float32, dtypes.float16, (dtypes.float16, False)),
(dtypes.float32, dtypes.float32, (dtypes.float32, False)),
(dtypes.float32, dtypes.float64, (dtypes.float64, False)),
(dtypes.float32, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.float32, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.float64, dtypes.bool, (dtypes.float64, True)),
(dtypes.float64, dtypes.uint8, (dtypes.float64, True)),
(dtypes.float64, dtypes.uint16, (dtypes.float64, True)),
(dtypes.float64, dtypes.uint32, (dtypes.float64, True)),
(dtypes.float64, dtypes.uint64, (dtypes.float64, True)),
(dtypes.float64, dtypes.int8, (dtypes.float64, True)),
(dtypes.float64, dtypes.int16, (dtypes.float64, True)),
(dtypes.float64, dtypes.int32, (dtypes.float64, True)),
(dtypes.float64, dtypes.int64, (dtypes.float64, True)),
(dtypes.float64, dtypes.bfloat16, (dtypes.bfloat16, False)),
(dtypes.float64, dtypes.float16, (dtypes.float16, False)),
(dtypes.float64, dtypes.float32, (dtypes.float32, False)),
(dtypes.float64, dtypes.float64, (dtypes.float64, False)),
(dtypes.float64, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.float64, dtypes.complex128, (dtypes.complex128, False)),
(dtypes.complex128, dtypes.bool, (dtypes.complex128, True)),
(dtypes.complex128, dtypes.uint8, (dtypes.complex128, True)),
(dtypes.complex128, dtypes.uint16, (dtypes.complex128, True)),
(dtypes.complex128, dtypes.uint32, (dtypes.complex128, True)),
(dtypes.complex128, dtypes.uint64, (dtypes.complex128, True)),
(dtypes.complex128, dtypes.int8, (dtypes.complex128, True)),
(dtypes.complex128, dtypes.int16, (dtypes.complex128, True)),
(dtypes.complex128, dtypes.int32, (dtypes.complex128, True)),
(dtypes.complex128, dtypes.int64, (dtypes.complex128, True)),
(dtypes.complex128, dtypes.bfloat16, (dtypes.complex64, False)),
(dtypes.complex128, dtypes.float16, (dtypes.complex64, False)),
(dtypes.complex128, dtypes.float32, (dtypes.complex64, False)),
(dtypes.complex128, dtypes.float64, (dtypes.complex128, False)),
(dtypes.complex128, dtypes.complex64, (dtypes.complex64, False)),
(dtypes.complex128, dtypes.complex128, (dtypes.complex128, False)),
)
def testResultTypeWeakTypesWithTF(self, weak_dtype_a, dtype_b, res_dtype):
with DtypeConversionTestEnv('all'):
input_a = (
constant_op.constant(1, dtype=weak_dtype_a)
if weak_dtype_a != dtypes.bool
else constant_op.constant(True)
)
input_b = (
constant_op.constant(2, dtype=dtype_b)
if dtype_b != dtypes.bool
else constant_op.constant(True)
)
weak_input_a = weak_tensor.WeakTensor(input_a)
self.assertEqual(
flexible_dtypes.result_type(weak_input_a, input_b), res_dtype
)
# Test all the possible weak types + weak types.
@parameterized.parameters(
(dtypes.int32, dtypes.int32, dtypes.int32),
(dtypes.int32, dtypes.int64, dtypes.int64),
(dtypes.int32, dtypes.float32, dtypes.float32),
(dtypes.int32, dtypes.float64, dtypes.float64),
(dtypes.int32, dtypes.complex128, dtypes.complex128),
(dtypes.int64, dtypes.int32, dtypes.int64),
(dtypes.int64, dtypes.int64, dtypes.int64),
(dtypes.int64, dtypes.float32, dtypes.float32),
(dtypes.int64, dtypes.float64, dtypes.float64),
(dtypes.int64, dtypes.complex128, dtypes.complex128),
(dtypes.float32, dtypes.int32, dtypes.float32),
(dtypes.float32, dtypes.int64, dtypes.float32),
(dtypes.float32, dtypes.float32, dtypes.float32),
(dtypes.float32, dtypes.float64, dtypes.float64),
(dtypes.float32, dtypes.complex128, dtypes.complex128),
(dtypes.float64, dtypes.int32, dtypes.float64),
(dtypes.float64, dtypes.int64, dtypes.float64),
(dtypes.float64, dtypes.float32, dtypes.float64),
(dtypes.float64, dtypes.float64, dtypes.float64),
(dtypes.float64, dtypes.complex128, dtypes.complex128),
(dtypes.complex128, dtypes.int32, dtypes.complex128),
(dtypes.complex128, dtypes.int64, dtypes.complex128),
(dtypes.complex128, dtypes.float32, dtypes.complex128),
(dtypes.complex128, dtypes.float64, dtypes.complex128),
(dtypes.complex128, dtypes.complex128, dtypes.complex128),
)
def testResultTypeWeakTypesWithWeakTypes(self, dtype_a, dtype_b, res_dtype):
with DtypeConversionTestEnv('all'):
input_a = (
constant_op.constant(1, dtype=dtype_a)
if dtype_a != dtypes.bool
else constant_op.constant(True)
)
input_b = (
constant_op.constant(2, dtype=dtype_b)
if dtype_b != dtypes.bool
else constant_op.constant(True)
)
weak_input_a = weak_tensor.WeakTensor(input_a)
weak_input_b = weak_tensor.WeakTensor(input_b)
self.assertEqual(
flexible_dtypes.result_type(weak_input_a, weak_input_b),
(res_dtype, True),
)
# Test unallowed promotions in SAFE mode. Make sure exceptions are thrown.
@parameterized.parameters(
((dtypes.uint8, False), (dtypes.int8, False)),
((dtypes.uint8, False), (dtypes.float32, True)),
((dtypes.uint16, False), (dtypes.int8, False)),
((dtypes.uint16, False), (dtypes.int16, False)),
((dtypes.uint16, False), (dtypes.bfloat16, False)),
((dtypes.uint16, False), (dtypes.float16, False)),
((dtypes.uint16, False), (dtypes.float32, True)),
((dtypes.uint32, False), (dtypes.int8, False)),
((dtypes.uint32, False), (dtypes.int16, False)),
((dtypes.uint32, False), (dtypes.int32, False)),
((dtypes.uint32, False), (dtypes.bfloat16, False)),
((dtypes.uint32, False), (dtypes.float32, False)),
((dtypes.uint32, False), (dtypes.complex64, False)),
((dtypes.uint32, False), (dtypes.float32, True)),
((dtypes.uint64, False), (dtypes.int8, False)),
((dtypes.uint64, False), (dtypes.int16, False)),
((dtypes.uint64, False), (dtypes.int32, False)),
((dtypes.uint64, False), (dtypes.int64, False)),
((dtypes.uint64, False), (dtypes.bfloat16, False)),
((dtypes.uint64, False), (dtypes.float16, False)),
((dtypes.uint64, False), (dtypes.float32, False)),
((dtypes.uint64, False), (dtypes.float64, False)),
((dtypes.uint64, False), (dtypes.complex64, False)),
((dtypes.uint64, False), (dtypes.complex128, False)),
((dtypes.uint64, False), (dtypes.float32, True)),
((dtypes.uint64, False), (dtypes.float64, True)),
((dtypes.uint64, False), (dtypes.complex128, True)),
((dtypes.int8, False), (dtypes.float32, True)),
((dtypes.int16, False), (dtypes.bfloat16, False)),
((dtypes.int16, False), (dtypes.float16, False)),
((dtypes.int16, False), (dtypes.float32, True)),
((dtypes.int32, False), (dtypes.bfloat16, False)),
((dtypes.int32, False), (dtypes.float16, False)),
((dtypes.int32, False), (dtypes.float32, False)),
((dtypes.int32, False), (dtypes.complex64, False)),
((dtypes.int32, False), (dtypes.float32, True)),
((dtypes.int64, False), (dtypes.bfloat16, False)),
((dtypes.int64, False), (dtypes.float16, False)),
((dtypes.int64, False), (dtypes.float32, False)),
((dtypes.int64, False), (dtypes.float64, False)),
((dtypes.int64, False), (dtypes.complex64, False)),
((dtypes.int64, False), (dtypes.complex128, False)),
((dtypes.int64, False), (dtypes.float32, True)),
((dtypes.int64, False), (dtypes.float64, True)),
((dtypes.int64, False), (dtypes.complex128, True)),
((dtypes.bfloat16, False), (dtypes.float16, False)),
((dtypes.bfloat16, False), (dtypes.complex128, True)),
)
def testResultTypeSafeModeUnallowedPromo(self, a_dtype, b_dtype):
with DtypeConversionTestEnv('safe'):
# Create Tensor of input dtypes.
input_a = (
constant_op.constant(1, dtype=a_dtype[0])
if a_dtype[0] != dtypes.bool
else constant_op.constant(True)
)
input_b = (
constant_op.constant(2, dtype=b_dtype[0])
if b_dtype[0] != dtypes.bool
else constant_op.constant(False)
)
# Create WeakTensors if weak = True.
if a_dtype[1]:
input_a = weak_tensor.WeakTensor(input_a)
if b_dtype[1]:
input_b = weak_tensor.WeakTensor(input_b)
with self.assertRaises(TypeError):
flexible_dtypes.result_type(input_a, input_b)
# Test allowed promotions in SAFE mode. Make sure no exception is thrown.
@parameterized.parameters(
((dtypes.bool, False), (dtypes.bool, False)),
((dtypes.bool, False), (dtypes.uint8, False)),
((dtypes.bool, False), (dtypes.uint16, False)),
((dtypes.bool, False), (dtypes.uint32, False)),
((dtypes.bool, False), (dtypes.uint64, False)),
((dtypes.bool, False), (dtypes.int8, False)),
((dtypes.bool, False), (dtypes.int16, False)),
((dtypes.bool, False), (dtypes.int32, False)),
((dtypes.bool, False), (dtypes.int64, False)),
((dtypes.bool, False), (dtypes.bfloat16, False)),
((dtypes.bool, False), (dtypes.float16, False)),
((dtypes.bool, False), (dtypes.float32, False)),
((dtypes.bool, False), (dtypes.float64, False)),
((dtypes.bool, False), (dtypes.complex64, False)),
((dtypes.bool, False), (dtypes.complex128, False)),
((dtypes.bool, False), (dtypes.int32, True)),
((dtypes.bool, False), (dtypes.int64, True)),
((dtypes.bool, False), (dtypes.float32, True)),
((dtypes.bool, False), (dtypes.float64, True)),
((dtypes.bool, False), (dtypes.complex128, True)),
((dtypes.uint8, False), (dtypes.uint8, False)),
((dtypes.uint8, False), (dtypes.uint16, False)),
((dtypes.uint8, False), (dtypes.uint32, False)),
((dtypes.uint8, False), (dtypes.uint64, False)),
((dtypes.uint8, False), (dtypes.int16, False)),
((dtypes.uint8, False), (dtypes.int32, False)),
((dtypes.uint8, False), (dtypes.int64, False)),
((dtypes.uint8, False), (dtypes.bfloat16, False)),
((dtypes.uint8, False), (dtypes.float16, False)),
((dtypes.uint8, False), (dtypes.float32, False)),
((dtypes.uint8, False), (dtypes.float64, False)),
((dtypes.uint8, False), (dtypes.complex64, False)),
((dtypes.uint8, False), (dtypes.complex128, False)),
((dtypes.uint8, False), (dtypes.int32, True)),
((dtypes.uint8, False), (dtypes.int64, True)),
((dtypes.uint8, False), (dtypes.float64, True)),
((dtypes.uint8, False), (dtypes.complex128, True)),
((dtypes.uint16, False), (dtypes.uint16, False)),
((dtypes.uint16, False), (dtypes.uint32, False)),
((dtypes.uint16, False), (dtypes.uint64, False)),
((dtypes.uint16, False), (dtypes.int32, False)),
((dtypes.uint16, False), (dtypes.int64, False)),
((dtypes.uint16, False), (dtypes.float32, False)),
((dtypes.uint16, False), (dtypes.float64, False)),
((dtypes.uint16, False), (dtypes.complex64, False)),
((dtypes.uint16, False), (dtypes.complex128, False)),
((dtypes.uint16, False), (dtypes.int32, True)),
((dtypes.uint16, False), (dtypes.int64, True)),
((dtypes.uint16, False), (dtypes.float64, True)),
((dtypes.uint16, False), (dtypes.complex128, True)),
((dtypes.uint32, False), (dtypes.uint32, False)),
((dtypes.uint32, False), (dtypes.uint64, False)),
((dtypes.uint32, False), (dtypes.int64, False)),
((dtypes.uint32, False), (dtypes.float64, False)),
((dtypes.uint32, False), (dtypes.complex128, False)),
((dtypes.uint32, False), (dtypes.int32, True)),
((dtypes.uint32, False), (dtypes.int64, True)),
((dtypes.uint32, False), (dtypes.float64, True)),
((dtypes.uint32, False), (dtypes.complex128, True)),
((dtypes.uint64, False), (dtypes.uint64, False)),
((dtypes.uint64, False), (dtypes.int32, True)),
((dtypes.uint64, False), (dtypes.int64, True)),
((dtypes.int8, False), (dtypes.int8, False)),
((dtypes.int8, False), (dtypes.int16, False)),
((dtypes.int8, False), (dtypes.int32, False)),
((dtypes.int8, False), (dtypes.int64, False)),
((dtypes.int8, False), (dtypes.bfloat16, False)),
((dtypes.int8, False), (dtypes.float16, False)),
((dtypes.int8, False), (dtypes.float32, False)),
((dtypes.int8, False), (dtypes.float64, False)),
((dtypes.int8, False), (dtypes.complex64, False)),
((dtypes.int8, False), (dtypes.complex128, False)),
((dtypes.int8, False), (dtypes.int32, True)),
((dtypes.int8, False), (dtypes.int64, True)),
((dtypes.int8, False), (dtypes.float64, True)),
((dtypes.int8, False), (dtypes.complex128, True)),
((dtypes.int16, False), (dtypes.int16, False)),
((dtypes.int16, False), (dtypes.int32, False)),
((dtypes.int16, False), (dtypes.int64, False)),
((dtypes.int16, False), (dtypes.float32, False)),
((dtypes.int16, False), (dtypes.float64, False)),
((dtypes.int16, False), (dtypes.complex64, False)),
((dtypes.int16, False), (dtypes.complex128, False)),
((dtypes.int16, False), (dtypes.int32, True)),
((dtypes.int16, False), (dtypes.int64, True)),
((dtypes.int16, False), (dtypes.float64, True)),
((dtypes.int16, False), (dtypes.complex128, True)),
((dtypes.int32, False), (dtypes.int32, False)),
((dtypes.int32, False), (dtypes.int64, False)),
((dtypes.int32, False), (dtypes.float64, False)),
((dtypes.int32, False), (dtypes.complex128, False)),
((dtypes.int32, False), (dtypes.int32, True)),
((dtypes.int32, False), (dtypes.int64, True)),
((dtypes.int32, False), (dtypes.float64, True)),
((dtypes.int32, False), (dtypes.complex128, True)),
((dtypes.int64, False), (dtypes.int64, False)),
((dtypes.int64, False), (dtypes.int32, True)),
((dtypes.int64, False), (dtypes.int64, True)),
((dtypes.bfloat16, False), (dtypes.bfloat16, False)),
((dtypes.bfloat16, False), (dtypes.float32, False)),
((dtypes.bfloat16, False), (dtypes.float64, False)),
((dtypes.bfloat16, False), (dtypes.complex64, False)),
((dtypes.bfloat16, False), (dtypes.complex128, False)),
((dtypes.bfloat16, False), (dtypes.int32, True)),
((dtypes.bfloat16, False), (dtypes.int64, True)),
((dtypes.bfloat16, False), (dtypes.float32, True)),
((dtypes.bfloat16, False), (dtypes.float64, True)),
((dtypes.float16, False), (dtypes.float16, False)),
((dtypes.float16, False), (dtypes.float32, False)),
((dtypes.float16, False), (dtypes.float64, False)),
((dtypes.float16, False), (dtypes.complex64, False)),
((dtypes.float16, False), (dtypes.complex128, False)),
((dtypes.float16, False), (dtypes.int32, True)),
((dtypes.float16, False), (dtypes.int64, True)),
((dtypes.float16, False), (dtypes.float32, True)),
((dtypes.float16, False), (dtypes.float64, True)),
((dtypes.float32, False), (dtypes.float32, False)),
((dtypes.float32, False), (dtypes.float64, False)),
((dtypes.float32, False), (dtypes.complex64, False)),
((dtypes.float32, False), (dtypes.complex128, False)),
((dtypes.float32, False), (dtypes.int32, True)),
((dtypes.float32, False), (dtypes.int64, True)),
((dtypes.float32, False), (dtypes.float32, True)),
((dtypes.float32, False), (dtypes.float64, True)),
((dtypes.float64, False), (dtypes.float64, False)),
((dtypes.float64, False), (dtypes.complex128, False)),
((dtypes.float64, False), (dtypes.int32, True)),
((dtypes.float64, False), (dtypes.int64, True)),
((dtypes.float64, False), (dtypes.float32, True)),
((dtypes.float64, False), (dtypes.float64, True)),
((dtypes.float64, False), (dtypes.complex128, True)),
((dtypes.complex64, False), (dtypes.complex64, False)),
((dtypes.complex64, False), (dtypes.complex128, False)),
((dtypes.complex64, False), (dtypes.int32, True)),
((dtypes.complex64, False), (dtypes.int64, True)),
((dtypes.complex64, False), (dtypes.float32, True)),
((dtypes.complex64, False), (dtypes.float64, True)),
((dtypes.complex64, False), (dtypes.complex128, True)),
((dtypes.complex128, False), (dtypes.complex128, False)),
((dtypes.complex128, False), (dtypes.int32, True)),
((dtypes.complex128, False), (dtypes.int64, True)),
((dtypes.complex128, False), (dtypes.float32, True)),
((dtypes.complex128, False), (dtypes.float64, True)),
((dtypes.complex128, False), (dtypes.complex128, True)),
((dtypes.int32, True), (dtypes.int32, True)),
((dtypes.int32, True), (dtypes.int64, True)),
((dtypes.int32, True), (dtypes.float32, True)),
((dtypes.int32, True), (dtypes.float64, True)),
((dtypes.int32, True), (dtypes.complex128, True)),
((dtypes.int64, True), (dtypes.int64, True)),
((dtypes.int64, True), (dtypes.float32, True)),
((dtypes.int64, True), (dtypes.float64, True)),
((dtypes.int64, True), (dtypes.complex128, True)),
((dtypes.float32, True), (dtypes.float32, True)),
((dtypes.float32, True), (dtypes.float64, True)),
((dtypes.float32, True), (dtypes.complex128, True)),
((dtypes.float64, True), (dtypes.float64, True)),
((dtypes.float64, True), (dtypes.complex128, True)),
((dtypes.complex128, True), (dtypes.complex128, True)),
)
def testResultTypeSafeModeAllowedPromo(self, a_dtype, b_dtype):
with DtypeConversionTestEnv('safe'):
# Create Tensor of input dtypes.
input_a = (
constant_op.constant(1, dtype=a_dtype[0])
if a_dtype[0] != dtypes.bool
else constant_op.constant(True)
)
input_b = (
constant_op.constant(2, dtype=b_dtype[0])
if b_dtype[0] != dtypes.bool
else constant_op.constant(False)
)
# Create WeakTensors if weak = True.
if a_dtype[1]:
input_a = weak_tensor.WeakTensor(input_a)
if b_dtype[1]:
input_b = weak_tensor.WeakTensor(input_b)
flexible_dtypes.result_type(input_a, input_b)
# Test Python nested structure type inference.
def testResultTypePythonNestedStructure(self):
with DtypeConversionTestEnv('all'):
# i32* + f32* => f32*
self.assertEqual(
flexible_dtypes.result_type([1], [1.0]),
(dtypes.float32, True),
)
# f32* + c128* => c128*
self.assertEqual(
flexible_dtypes.result_type([1, 2.0], [1.0j]),
(dtypes.complex128, True),
)
self.assertEqual(
flexible_dtypes.result_type([[1, 1.0], [1.0, 1.0]], [1.0j]),
(dtypes.complex128, True),
)
# Test tf.variable type inference.
def testResultTypeVariable(self):
with DtypeConversionTestEnv('all'):
v = variables.Variable(1.0, dtype=dtypes.float32)
t = constant_op.constant(1, dtype=dtypes.float64)
self.assertEqual(
flexible_dtypes.result_type(v, t),
(dtypes.float64, False),
)
# Test TF Dtypes type inference.
def testResultTypeTFDtype(self):
with DtypeConversionTestEnv('all'):
d1 = dtypes.float32
d2 = dtypes.float16
self.assertEqual(
flexible_dtypes.result_type(d1, d2),
(dtypes.float32, False),
)
# Test NP dtype class type inference.
def testResultTypeNPDtype(self):
with DtypeConversionTestEnv('all'):
d = np.dtype(np.float32)
self.assertEqual(
flexible_dtypes.result_type(d),
(dtypes.float32, False),
)
d = np.dtype([('f1', np.int16)])
with self.assertRaises(NotImplementedError):
_ = flexible_dtypes.result_type(d)
d = np.dtype([('a', 'f8'), ('b', 'S10')])
with self.assertRaises(NotImplementedError):
_ = flexible_dtypes.result_type(d)
# Test bool type inference.
def testResultTypeBool(self):
with DtypeConversionTestEnv('all'):
self.assertEqual(
flexible_dtypes.result_type(True, False),
(dtypes.bool, False),
)
# Test Tensor shape type inference.
def testResultTypeTensorShape(self):
with DtypeConversionTestEnv('all'):
t = constant_op.constant([1, 2], dtype=dtypes.float64)
self.assertEqual(
flexible_dtypes.result_type(t.shape), (dtypes.int32, False)
)
# Test string types.
def testResultTypeStr(self):
with DtypeConversionTestEnv('all'):
res = flexible_dtypes.result_type('foo', 'bar')
self.assertEqual(res[0], dtypes.string)
with self.assertRaisesRegex(
NotImplementedError,
"Implicit Conversion between <dtype: 'string'> and <dtype: 'int32'>"
' is not allowed. Please convert the input manually if you need to.',
):
flexible_dtypes.result_type('foo', 1)
# Test byte types.
def testResultTypeBytes(self):
with DtypeConversionTestEnv('all'):
res = flexible_dtypes.result_type(b'foo', b'bar')
self.assertEqual(res[0], dtypes.string)
with self.assertRaisesRegex(
NotImplementedError,
"Implicit Conversion between <dtype: 'string'> and <dtype: 'int32'>"
' is not allowed. Please convert the input manually if you need to.',
):
flexible_dtypes.result_type(b'foo', 1)
# Test empty input.
def testResultTypeEmptyInput(self):
with DtypeConversionTestEnv('all'):
dtype, is_weak = flexible_dtypes.result_type()
self.assertEqual(dtype, dtypes.float32)
self.assertTrue(is_weak)
def testResultTypeUnsupportedInputType(self):
class MyTensor(extension_type.ExtensionType):
value: tensor.Tensor
with DtypeConversionTestEnv('all'):
a = MyTensor(constant_op.constant(1))
with self.assertRaisesRegex(
NotImplementedError,
f'Auto dtype conversion semantics does not support {type(a)} type.',
):
_ = flexible_dtypes.result_type(a)
# Test v1 + v2 = v2 + v1.
def testCommunicativity(self):
with DtypeConversionTestEnv('all'):
for v1 in _ALL_INPUT_TYPES:
for v2 in _ALL_INPUT_TYPES:
self.assertEqual(
flexible_dtypes.result_type(v1, v2),
flexible_dtypes.result_type(v2, v1),
)
# Test (v1 + v2) + v3 = v1 + (v2 + v3).
def testAssociativity(self):
with DtypeConversionTestEnv('all'):
for v1 in _ALL_INPUT_TYPES:
for v2 in _ALL_INPUT_TYPES:
for v3 in _ALL_INPUT_TYPES:
all_res = [
flexible_dtypes.result_type(v1, v2, v3),
flexible_dtypes.result_type(v1, v3, v2),
flexible_dtypes.result_type(v2, v1, v3),
flexible_dtypes.result_type(v2, v3, v1),
flexible_dtypes.result_type(v3, v1, v2),
flexible_dtypes.result_type(v3, v2, v1),
]
self.assertAllEqual(all_res[:-1], all_res[1:])
if __name__ == '__main__':
tf_test.main()
ops.enable_eager_execution()
| DtypesUtilTest |
python | huggingface__transformers | tests/models/videomae/test_video_processing_videomae.py | {
"start": 3683,
"end": 6257
} | class ____(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = VideoMAEVideoProcessor if is_torchvision_available() else None
input_name = "pixel_values"
def setUp(self):
super().setUp()
self.video_processor_tester = VideoMAEVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_center_crop"))
self.assertTrue(hasattr(video_processing, "center_crop"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
self.assertTrue(hasattr(video_processing, "model_input_names"))
self.assertIn("pixel_values", video_processing.model_input_names)
def test_pixel_value_identity(self):
"""
Verify that VideoMAEVideoProcessor (TorchCodec-based) produces pixel tensors
numerically similar to those from VideoMAEImageProcessor (PIL-based).
Minor (<1%) differences are expected due to color conversion and interpolation.
"""
video = self.video_processor_tester.prepare_video_inputs(return_tensors="np")
video_processor = VideoMAEVideoProcessor(**self.video_processor_dict)
image_processor = VideoMAEImageProcessor(**self.video_processor_dict)
video_frames_np = video[0]
video_frames_pil = [Image.fromarray(frame.astype("uint8")) for frame in video_frames_np]
video_out = video_processor(video_frames_pil, return_tensors="pt")
image_out = image_processor(video_frames_pil, return_tensors="pt")
torch.testing.assert_close(
video_out["pixel_values"],
image_out["pixel_values"],
rtol=5e-2,
atol=1e-2,
msg=(
"Pixel values differ slightly between VideoMAEVideoProcessor "
"and VideoMAEImageProcessor. "
"Differences ≤1% are expected due to YUV→RGB conversion and "
"interpolation behavior in different decoders."
),
)
| VideoMAEVideoProcessingTest |
python | altair-viz__altair | tools/generate_schema_wrapper.py | {
"start": 13808,
"end": 14019
} | class ____(TypedDict, total=False):
bottom: float
left: float
right: float
top: float
Temporal: TypeAlias = Union[date, datetime]
'''
_ChannelType = Literal["field", "datum", "value"]
| PaddingKwds |
python | ansible__ansible | test/integration/targets/var_precedence/ansible-var-precedence-check.py | {
"start": 793,
"end": 2311
} | class ____(object):
def __init__(self, name):
self.name = name
self.load = True
self.dependencies = []
self.defaults = False
self.vars = False
self.tasks = []
self.params = dict()
def write_role(self):
fpath = os.path.join(TESTDIR, 'roles', self.name)
if not os.path.isdir(fpath):
os.makedirs(fpath)
if self.defaults:
# roles/x/defaults/main.yml
fpath = os.path.join(TESTDIR, 'roles', self.name, 'defaults')
if not os.path.isdir(fpath):
os.makedirs(fpath)
fname = os.path.join(fpath, 'main.yml')
with open(fname, 'w') as f:
f.write('findme: %s\n' % self.name)
if self.vars:
# roles/x/vars/main.yml
fpath = os.path.join(TESTDIR, 'roles', self.name, 'vars')
if not os.path.isdir(fpath):
os.makedirs(fpath)
fname = os.path.join(fpath, 'main.yml')
with open(fname, 'w') as f:
f.write('findme: %s\n' % self.name)
if self.dependencies:
fpath = os.path.join(TESTDIR, 'roles', self.name, 'meta')
if not os.path.isdir(fpath):
os.makedirs(fpath)
fname = os.path.join(fpath, 'main.yml')
with open(fname, 'w') as f:
f.write('dependencies:\n')
for dep in self.dependencies:
f.write('- { role: %s }\n' % dep)
| Role |
python | numba__numba | numba/tests/test_array_attr.py | {
"start": 11257,
"end": 11665
} | class ____(MemoryLeakMixin, TestCase):
"""Regression test for: https://github.com/numba/numba/issues/4775 """
def test(self):
@jitclass(dict())
class B(object):
def __init__(self):
pass
def foo(self, X):
X.flags
Z = B()
Z.foo(np.ones(4))
if __name__ == '__main__':
unittest.main()
| TestJitclassFlagsSegfault |
python | pyinstaller__pyinstaller | bootloader/waflib/Scripting.py | {
"start": 8914,
"end": 12470
} | class ____(Context.Context):
'''creates an archive containing the project source code'''
cmd = 'dist'
fun = 'dist'
algo = 'tar.bz2'
ext_algo = {}
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
import tarfile
arch_name = self.get_arch_name()
try:
self.base_path
except AttributeError:
self.base_path = self.path
node = self.base_path.make_node(arch_name)
try:
node.delete()
except OSError:
pass
files = self.get_files()
if self.algo.startswith('tar.'):
tar = tarfile.open(node.abspath(), 'w:' + self.algo.replace('tar.', ''))
for x in files:
self.add_tar_file(x, tar)
tar.close()
elif self.algo == 'zip':
import zipfile
zip = zipfile.ZipFile(node.abspath(), 'w', compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name = self.get_base_name() + '/' + x.path_from(self.base_path)
zip.write(x.abspath(), archive_name, zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz, tar.xz or zip')
try:
from hashlib import sha256
except ImportError:
digest = ''
else:
digest = ' (sha256=%r)' % sha256(node.read(flags='rb')).hexdigest()
Logs.info('New archive created: %s%s', self.arch_name, digest)
def get_tar_path(self, node):
return node.abspath()
def add_tar_file(self, x, tar):
p = self.get_tar_path(x)
tinfo = tar.gettarinfo(name=p, arcname=self.get_tar_prefix() + '/' + x.path_from(self.base_path))
tinfo.uid = 0
tinfo.gid = 0
tinfo.uname = 'root'
tinfo.gname = 'root'
if os.path.isfile(p):
with open(p, 'rb') as f:
tar.addfile(tinfo, fileobj=f)
else:
tar.addfile(tinfo)
def get_tar_prefix(self):
try:
return self.tar_prefix
except AttributeError:
return self.get_base_name()
def get_arch_name(self):
try:
self.arch_name
except AttributeError:
self.arch_name = self.get_base_name() + '.' + self.ext_algo.get(self.algo, self.algo)
return self.arch_name
def get_base_name(self):
try:
self.base_name
except AttributeError:
appname = getattr(Context.g_module, Context.APPNAME, 'noname')
version = getattr(Context.g_module, Context.VERSION, '1.0')
self.base_name = appname + '-' + version
return self.base_name
def get_excl(self):
try:
return self.excl
except AttributeError:
self.excl = Node.exclude_regs + ' **/waf-2.* **/.waf-2.* **/waf3-2.* **/.waf3-2.* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*'
if Context.out_dir:
nd = self.root.find_node(Context.out_dir)
if nd:
self.excl += ' ' + nd.path_from(self.base_path)
return self.excl
def get_files(self):
try:
files = self.files
except AttributeError:
files = self.base_path.ant_glob('**/*', excl=self.get_excl())
return files
def dist(ctx):
'''makes a tarball for redistributing the sources'''
pass
| Dist |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 131917,
"end": 132047
} | class ____(BaseModel, extra="forbid"):
start_resharding: "StartResharding" = Field(..., description="")
| StartReshardingOperation |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 426,
"end": 582
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("TEAM", "USER")
| ActorType |
python | doocs__leetcode | lcp/LCP 52. 二叉搜索树染色/Solution.py | {
"start": 164,
"end": 669
} | class ____:
def getNumber(self, root: Optional[TreeNode], ops: List[List[int]]) -> int:
def dfs(root):
if root is None:
return
sl.add(root.val)
dfs(root.left)
dfs(root.right)
sl = SortedList()
dfs(root)
ans = 0
for t, x, y in ops[::-1]:
i = sl.bisect_left(x)
while i < len(sl) and sl[i] <= y:
sl.pop(i)
ans += t == 1
return ans
| Solution |
python | numpy__numpy | benchmarks/benchmarks/bench_ma.py | {
"start": 6696,
"end": 7430
} | class ____(Benchmark):
param_names = ['margs', 'mset', 'msize']
params = [[0, (0, 0), (-1, 0)],
[17, np.ma.masked],
['small', 'big']]
def setup(self, margs, mset, msize):
xs = np.random.uniform(-1, 1, 6).reshape(2, 3)
m1 = [[True, False, False], [False, False, True]]
xl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100)
maskx = xl > 0.8
self.nmxs = np.ma.array(xs, mask=m1)
self.nmxl = np.ma.array(xl, mask=maskx)
def time_methods_setitem(self, margs, mset, msize):
if msize == 'small':
mdat = self.nmxs
elif msize == 'big':
mdat = self.nmxl
mdat.__setitem__(margs, mset)
| MAMethodSetItem |
python | apache__airflow | helm-tests/tests/helm_tests/security/test_security_context.py | {
"start": 900,
"end": 4379
} | class ____:
"""Tests SC Backward Compatibility."""
def test_check_deployments_and_jobs(self):
docs = render_chart(
values={
"uid": 3000,
"gid": 30,
"webserver": {"defaultUser": {"enabled": True}},
"flower": {"enabled": True},
"airflowVersion": "2.2.0",
"executor": "CeleryKubernetesExecutor",
},
show_only=[
"templates/flower/flower-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/jobs/create-user-job.yaml",
"templates/jobs/migrate-database-job.yaml",
],
)
for doc in docs:
assert jmespath.search("spec.template.spec.securityContext.runAsUser", doc) == 3000
assert jmespath.search("spec.template.spec.securityContext.fsGroup", doc) == 30
def test_check_statsd_uid(self):
docs = render_chart(
values={"statsd": {"enabled": True, "uid": 3000}},
show_only=["templates/statsd/statsd-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.securityContext.runAsUser", docs[0]) == 3000
def test_check_pgbouncer_uid(self):
docs = render_chart(
values={"pgbouncer": {"enabled": True, "uid": 3000}},
show_only=["templates/pgbouncer/pgbouncer-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.securityContext.runAsUser", docs[0]) == 3000
def test_check_cleanup_job(self):
docs = render_chart(
values={"uid": 3000, "gid": 30, "cleanup": {"enabled": True}},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert (
jmespath.search("spec.jobTemplate.spec.template.spec.securityContext.runAsUser", docs[0]) == 3000
)
assert jmespath.search("spec.jobTemplate.spec.template.spec.securityContext.fsGroup", docs[0]) == 30
def test_gitsync_sidecar_and_init_container(self):
docs = render_chart(
values={
"dags": {"gitSync": {"enabled": True, "uid": 3000}},
"airflowVersion": "1.10.15",
},
show_only=[
"templates/workers/worker-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
],
)
for doc in docs:
assert "git-sync" in [c["name"] for c in jmespath.search("spec.template.spec.containers", doc)]
assert "git-sync-init" in [
c["name"] for c in jmespath.search("spec.template.spec.initContainers", doc)
]
assert (
jmespath.search(
"spec.template.spec.initContainers[?name=='git-sync-init'].securityContext.runAsUser | [0]",
doc,
)
== 3000
)
assert (
jmespath.search(
"spec.template.spec.containers[?name=='git-sync'].securityContext.runAsUser | [0]",
doc,
)
== 3000
)
| TestSCBackwardsCompatibility |
python | Lightning-AI__lightning | src/lightning/pytorch/loops/fetchers.py | {
"start": 6469,
"end": 7350
} | class ____(Iterator):
def __init__(self, data_fetcher: _DataLoaderIterDataFetcher) -> None:
self.data_fetcher = data_fetcher
@property
def done(self) -> bool:
return self.data_fetcher.done
@property
def fetched(self) -> int:
return self.data_fetcher.fetched
@property
def length(self) -> Optional[int]:
return self.data_fetcher.length
@override
def __next__(self) -> _ITERATOR_RETURN:
fetcher = self.data_fetcher
if fetcher.done:
raise StopIteration
batch, batch_idx, dataloader_idx = super(_DataLoaderIterDataFetcher, fetcher).__next__()
# save the state so the loops can access it
fetcher._batch = batch
fetcher._batch_idx = batch_idx
fetcher._dataloader_idx = dataloader_idx
return batch, batch_idx, dataloader_idx
| _DataFetcherWrapper |
python | gevent__gevent | src/greentest/3.10/test_signal.py | {
"start": 371,
"end": 1501
} | class ____(unittest.TestCase):
def test_enums(self):
for name in dir(signal):
sig = getattr(signal, name)
if name in {'SIG_DFL', 'SIG_IGN'}:
self.assertIsInstance(sig, signal.Handlers)
elif name in {'SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'}:
self.assertIsInstance(sig, signal.Sigmasks)
elif name.startswith('SIG') and not name.startswith('SIG_'):
self.assertIsInstance(sig, signal.Signals)
elif name.startswith('CTRL_'):
self.assertIsInstance(sig, signal.Signals)
self.assertEqual(sys.platform, "win32")
def test_functions_module_attr(self):
# Issue #27718: If __all__ is not defined all non-builtin functions
# should have correct __module__ to be displayed by pydoc.
for name in dir(signal):
value = getattr(signal, name)
if inspect.isroutine(value) and not inspect.isbuiltin(value):
self.assertEqual(value.__module__, 'signal')
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
| GenericTests |
python | gevent__gevent | src/greentest/3.11/test_socket.py | {
"start": 12676,
"end": 13755
} | class ____(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipIf(WSL, 'VSOCK does not work on Microsoft WSL')
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2, # VMADDR_CID_HOST
"This test can only be run on a virtual guest.")
| ThreadedRDSSocketTest |
python | apache__airflow | airflow-core/src/airflow/models/tasklog.py | {
"start": 1076,
"end": 1856
} | class ____(Base):
"""
Changes to ``log_filename_template`` and ``elasticsearch_id``.
This table is automatically populated when Airflow starts up, to store the
config's value if it does not match the last row in the table.
"""
__tablename__ = "log_template"
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
filename: Mapped[str] = mapped_column(Text, nullable=False)
elasticsearch_id: Mapped[str] = mapped_column(Text, nullable=False)
created_at: Mapped[datetime] = mapped_column(UtcDateTime, nullable=False, default=timezone.utcnow)
def __repr__(self) -> str:
attrs = ", ".join(f"{k}={getattr(self, k)}" for k in ("filename", "elasticsearch_id"))
return f"LogTemplate({attrs})"
| LogTemplate |
python | readthedocs__readthedocs.org | readthedocs/builds/tasks.py | {
"start": 18364,
"end": 24051
} | class ____:
webhook_timeout = 2
def __init__(self, version, build, event):
self.version = version
self.build = build
self.project = version.project
self.event = event
def send(self):
"""
Send email and webhook notifications for `project` about the `build`.
Email notifications are only send for build:failed events.
Webhooks choose to what events they subscribe to.
"""
if self.event == WebHookEvent.BUILD_FAILED:
email_addresses = self.project.emailhook_notifications.all().values_list(
"email", flat=True
)
for email in email_addresses:
try:
self.send_email(email)
except Exception:
log.exception(
"Failed to send email notification.",
email=email,
project_slug=self.project.slug,
version_slug=self.version.slug,
build_id=self.build.id,
)
webhooks = self.project.webhook_notifications.filter(events__name=self.event)
for webhook in webhooks:
try:
self.send_webhook(webhook)
except Exception:
log.exception(
"Failed to send webhook.",
webhook_id=webhook.id,
project_slug=self.project.slug,
version_slug=self.version.slug,
build_id=self.build.id,
)
def send_email(self, email):
"""Send email notifications for build failures."""
protocol = "http" if settings.DEBUG else "https"
context = {
"version": {
"verbose_name": self.version.verbose_name,
},
"project": {
"name": self.project.name,
},
"build": {
"pk": self.build.pk,
"error": self.build.error,
},
"build_url": "{}://{}{}".format(
protocol,
settings.PRODUCTION_DOMAIN,
self.build.get_absolute_url(),
),
"build_raw": "{}://{}{}".format(
protocol,
settings.PRODUCTION_DOMAIN,
reverse("build-detail", args=[self.build.pk, "txt"]),
),
"unsubscribe_url": "{}://{}{}".format(
protocol,
settings.PRODUCTION_DOMAIN,
reverse("projects_notifications", args=[self.project.slug]),
),
}
if self.build.commit:
title = _("Failed: {project[name]} ({commit})").format(
commit=self.build.commit[:8],
**context,
)
else:
title = _("Failed: {project[name]} ({version[verbose_name]})").format(**context)
log.info(
"Sending email notification.",
email=email,
project_slug=self.project.slug,
version_slug=self.version.slug,
build_id=self.build.id,
)
send_email(
email,
title,
template="projects/email/build_failed.txt",
template_html="projects/email/build_failed.html",
context=context,
)
def send_webhook(self, webhook):
"""
Send webhook notification.
The payload is signed using HMAC-SHA256,
for users to be able to verify the authenticity of the request.
Webhooks that don't have a payload,
are from the old implementation, for those we keep sending the
old default payload.
An HttpExchange object is created for each transaction.
"""
payload = webhook.get_payload(
version=self.version,
build=self.build,
event=self.event,
)
if not payload:
# Default payload from old webhooks.
payload = json.dumps(
{
"name": self.project.name,
"slug": self.project.slug,
"build": {
"id": self.build.id,
"commit": self.build.commit,
"state": self.build.state,
"success": self.build.success,
"date": self.build.date.strftime("%Y-%m-%d %H:%M:%S"),
},
}
)
headers = {
"content-type": "application/json",
"User-Agent": f"Read-the-Docs/{__version__} ({settings.PRODUCTION_DOMAIN})",
"X-RTD-Event": self.event,
}
if webhook.secret:
headers["X-Hub-Signature"] = webhook.sign_payload(payload)
try:
log.info(
"Sending webhook notification.",
webhook_id=webhook.id,
project_slug=self.project.slug,
version_slug=self.version.slug,
build_id=self.build.id,
)
response = requests.post(
webhook.url,
data=payload,
headers=headers,
timeout=self.webhook_timeout,
)
HttpExchange.objects.from_requests_exchange(
response=response,
related_object=webhook,
)
except Exception:
log.exception(
"Failed to POST to webhook url.",
webhook_id=webhook.id,
webhook_url=webhook.url,
)
| BuildNotificationSender |
python | spyder-ide__spyder | external-deps/spyder-kernels/spyder_kernels/comms/commbase.py | {
"start": 16594,
"end": 17424
} | class ____:
"""Class to create `RemoteCall`s."""
def __init__(self, comms_wrapper, comm_id, callback, **settings):
# Avoid setting attributes
super(RemoteCallFactory, self).__setattr__(
'_comms_wrapper', comms_wrapper)
super(RemoteCallFactory, self).__setattr__('_comm_id', comm_id)
super(RemoteCallFactory, self).__setattr__('_callback', callback)
super(RemoteCallFactory, self).__setattr__('_settings', settings)
def __getattr__(self, name):
"""Get a call for a function named 'name'."""
return RemoteCall(name, self._comms_wrapper, self._comm_id,
self._callback, self._settings)
def __setattr__(self, name, value):
"""Set an attribute to the other side."""
raise NotImplementedError
| RemoteCallFactory |
python | django__django | django/contrib/postgres/aggregates/general.py | {
"start": 770,
"end": 822
} | class ____(Aggregate):
function = "BIT_XOR"
| BitXor |
python | allegroai__clearml | clearml/backend_api/services/v2_9/models.py | {
"start": 14253,
"end": 23781
} | class ____(Request):
"""
Create a new model not associated with a task
:param uri: URI for the model
:type uri: str
:param name: Model name Unique within the company.
:type name: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param framework: Framework on which the model is based. Case insensitive.
Should be identical to the framework of the task which created the model.
:type framework: str
:param design: Json[d] object representing the model design. Should be
identical to the network design of the task which created the model
:type design: dict
:param labels: Json object
:type labels: dict
:param ready: Indication if the model is final and can be used by other tasks
Default is false.
:type ready: bool
:param public: Create a public model Default is false.
:type public: bool
:param project: Project to which to model belongs
:type project: str
:param parent: Parent model
:type parent: str
:param task: Associated task ID
:type task: str
"""
_service = "models"
_action = "create"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"comment": {"description": "Model comment", "type": "string"},
"design": {
"additionalProperties": True,
"description": "Json[d] object representing the model design. Should be identical to the network design of the task which created the model",
"type": "object",
},
"framework": {
"description": "Framework on which the model is based. Case insensitive. Should be identical to the framework of the task which created the model.",
"type": "string",
},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object",
"type": "object",
},
"name": {
"description": "Model name Unique within the company.",
"type": "string",
},
"parent": {"description": "Parent model", "type": "string"},
"project": {
"description": "Project to which to model belongs",
"type": "string",
},
"public": {
"default": False,
"description": "Create a public model Default is false.",
"type": "boolean",
},
"ready": {
"default": False,
"description": "Indication if the model is final and can be used by other tasks Default is false.",
"type": "boolean",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Associated task ID", "type": "string"},
"uri": {"description": "URI for the model", "type": "string"},
},
"required": ["uri", "name"],
"type": "object",
}
def __init__(
self,
uri: str,
name: str,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
framework: Optional[str] = None,
design: Optional[dict] = None,
labels: Optional[dict] = None,
ready: Optional[bool] = False,
public: Optional[bool] = False,
project: Optional[str] = None,
parent: Optional[str] = None,
task: Optional[str] = None,
**kwargs: Any
) -> None:
super(CreateRequest, self).__init__(**kwargs)
self.uri = uri
self.name = name
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.framework = framework
self.design = design
self.labels = labels
self.ready = ready
self.public = public
self.project = project
self.parent = parent
self.task = task
@schema_property("uri")
def uri(self) -> str:
return self._property_uri
@uri.setter
def uri(self, value: str) -> None:
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", six.string_types)
self._property_uri = value
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("framework")
def framework(self) -> Optional[str]:
return self._property_framework
@framework.setter
def framework(self, value: Optional[str]) -> None:
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", six.string_types)
self._property_framework = value
@schema_property("design")
def design(self) -> Optional[dict]:
return self._property_design
@design.setter
def design(self, value: Optional[dict]) -> None:
if value is None:
self._property_design = None
return
self.assert_isinstance(value, "design", (dict,))
self._property_design = value
@schema_property("labels")
def labels(self) -> Optional[dict]:
return self._property_labels
@labels.setter
def labels(self, value: Optional[dict]) -> None:
if value is None:
self._property_labels = None
return
self.assert_isinstance(value, "labels", (dict,))
self._property_labels = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("public")
def public(self) -> Optional[bool]:
return self._property_public
@public.setter
def public(self, value: Optional[bool]) -> None:
if value is None:
self._property_public = None
return
self.assert_isinstance(value, "public", (bool,))
self._property_public = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| CreateRequest |
python | PrefectHQ__prefect | scripts/generate_cli_docs.py | {
"start": 737,
"end": 12246
} | class ____(TypedDict):
"""A dictionary representing a command context."""
indent: int
command_name: str
title: str
help: list[DocstringSection]
usage_pieces: list[str]
args: list[ArgumentDict]
# Storing "option" params. If you don't need them typed as click.Option,
# "Parameter" is enough to capture both options/arguments in general.
opts: list[Parameter]
examples: list[str]
epilog: str | None
commands: list[CommandSummaryDict]
subcommands: list[BuildDocsContext]
def get_help_text(docstring_object: str) -> list[DocstringSection]:
"""Get help text sections from a docstring.
Args:
docstring_object: The docstring to parse.
Returns:
list of docstring text sections.
"""
return [
section
for section in Docstring(inspect.cleandoc(docstring_object), lineno=1).parse(
"google",
warnings=False,
)
if section.kind == "text"
]
def get_examples(docstring_object: str) -> list[str]:
"""Get example strings from a docstring.
Args:
docstring_object: The docstring to parse.
Returns:
list of example strings.
"""
return [
text
for section in Docstring(inspect.cleandoc(docstring_object), lineno=1).parse(
"google",
warnings=False,
)
if isinstance(section, DocstringSectionExamples)
for _, text in section.value
]
def build_docs_context(
*,
obj: Command,
ctx: click.Context,
indent: int = 0,
name: str = "",
call_prefix: str = "",
) -> BuildDocsContext:
"""Build a command context for documentation generation.
Args:
obj: The Click command object to document
ctx: The Click context
indent: Indentation level for nested commands
name: Override name for the command
call_prefix: Prefix to add to command name
Returns:
A BuildDocsContext object
"""
# Command name can be empty, so ensure we always end up with a string
if call_prefix:
command_name = f"{call_prefix} {obj.name or ''}".strip()
else:
command_name = name if name else (obj.name or "")
title: str = f"`{command_name}`" if command_name else "CLI"
usage_pieces: list[str] = obj.collect_usage_pieces(ctx)
args_list: list[ArgumentDict] = []
opts_list: list[Parameter] = []
# Collect arguments vs. options (skip the built-in help option)
for param in obj.get_params(ctx):
# If the parameter is an Option and its opts include '--help', skip it.
if isinstance(param, click.Option) and "--help" in param.opts:
continue
help_record = param.get_help_record(ctx) # Optional[tuple[str, str]]
if help_record is not None:
param_name, param_help = help_record
if getattr(param, "param_type_name", "") == "argument":
args_list.append({"name": param_name, "help": param_help})
elif getattr(param, "param_type_name", "") == "option":
opts_list.append(param)
commands_list: list[CommandSummaryDict] = []
subcommands: list[BuildDocsContext] = []
# Only MultiCommand objects have subcommands
if isinstance(obj, MultiCommand):
all_commands: list[str] = obj.list_commands(ctx)
# Filter out help commands and blocked commands
blocked_commands = {"help", "--help", "deploy", "cloud"}
filtered_commands: list[str] = [
cmd for cmd in all_commands if cmd not in blocked_commands
]
for command in filtered_commands:
command_obj = obj.get_command(ctx, command)
assert command_obj, f"Command {command} not found in {obj.name}" # noqa: S101
# Prepare a short "summary" for listing
cmd_name = command_obj.name or ""
cmd_help = command_obj.get_short_help_str()
commands_list.append({"name": cmd_name, "help": cmd_help})
# Recursively build docs for each subcommand
for command in filtered_commands:
command_obj = obj.get_command(ctx, command)
assert command_obj # noqa: S101
sub_ctx = build_docs_context(
obj=command_obj,
ctx=ctx,
indent=indent + 1,
name="", # Let the function pick the name from command_obj
call_prefix=command_name,
)
subcommands.append(sub_ctx)
return BuildDocsContext(
indent=indent,
command_name=command_name,
title=title,
help=get_help_text(obj.help or ""),
examples=get_examples(obj.help or ""),
usage_pieces=usage_pieces,
args=args_list,
opts=opts_list,
epilog=obj.epilog, # Optional[str]
commands=commands_list,
subcommands=subcommands,
)
def escape_mdx(text: str) -> str:
"""Escape characters that commonly break MDX (Mintlify).
- Replace angle brackets < >
- Replace curly braces { }
- Escape backticks, pipes, and arrow functions
- Escape dollar signs to avoid template interpolation.
"""
import re
if not text:
return ""
# First, let's preserve code blocks by temporarily replacing them
# This regex matches triple backtick code blocks
code_blocks = []
code_block_pattern = r"```[\s\S]*?```"
def store_code_block(match):
code_blocks.append(match.group(0))
return f"__CODE_BLOCK_{len(code_blocks) - 1}__"
text = re.sub(code_block_pattern, store_code_block, text)
# Also preserve inline code (single backticks)
inline_code = []
inline_code_pattern = r"`[^`]+`"
def store_inline_code(match):
inline_code.append(match.group(0))
return f"__INLINE_CODE_{len(inline_code) - 1}__"
text = re.sub(inline_code_pattern, store_inline_code, text)
# Escape < and >
text = text.replace("<", "<").replace(">", ">")
# Escape { and }
text = text.replace("{", "{").replace("}", "}")
# Escape backticks (only those not in code blocks)
text = text.replace("`", "\\`")
# Escape pipes (especially in tables)
text = text.replace("|", "\\|")
# Escape => arrow
text = re.sub(r"(?<!\w)=>(?!\w)", "\\=>", text)
# Escape $
text = text.replace("$", "\\$")
# Escape ! at start of lines
text = re.sub(r"(?m)^!", "\\!", text)
# Restore code blocks
for i, block in enumerate(code_blocks):
text = text.replace(f"__CODE_BLOCK_{i}__", block)
# Restore inline code
for i, code in enumerate(inline_code):
text = text.replace(f"__INLINE_CODE_{i}__", code)
return text
def write_command_docs(
command_context: BuildDocsContext,
env: Environment,
output_dir: str,
) -> None:
"""Render a single command (and do *not* recurse in the template).
Then recurse here in Python for each subcommand.
Args:
command_context: Context containing command documentation
env: Jinja environment for rendering templates
output_dir: Directory to write output files
"""
# 1. Render the Jinja template for this command only
template = env.get_template("docs_template.jinja")
rendered = template.render(command=command_context)
# 2. Create a filename. For example, use the "command_name" field:
# Convert any spaces or slashes to underscores, etc.
command_name_clean = command_context["command_name"].replace(" ", "_")
if not command_name_clean:
command_name_clean = "cli_root" # fallback if top-level name is empty
filename = f"{command_name_clean}.mdx"
filepath = Path(output_dir) / filename
# 3. Write out to disk
Path(output_dir).mkdir(parents=True, exist_ok=True)
with Path.open(filepath, mode="w", encoding="utf-8") as f:
f.write(rendered)
# 4. Recursively render subcommands in the same manner
for sub_ctx in command_context["subcommands"]:
write_command_docs(sub_ctx, env, output_dir)
def render_command_and_subcommands(
cmd_context: BuildDocsContext,
env: Environment,
) -> str:
"""Render the given command then recurse in Python to render/append all subcommands.
Args:
cmd_context: Context containing command documentation
env: Jinja environment for rendering templates
Returns:
Rendered documentation string
"""
# 1) Render the "cmd_context" itself:
template = env.get_template("docs_template.jinja")
rendered = template.render(command=cmd_context)
# 2) Recursively render each child subcommand and concatenate
for sub_ctx in cmd_context["subcommands"]:
sub_rendered = render_command_and_subcommands(sub_ctx, env)
rendered += "\n\n" + sub_rendered
return rendered
def write_subcommand_docs(
top_level_sub: BuildDocsContext,
env: Environment,
output_dir: str,
) -> None:
"""Render one *top-level* and all nested subcommands into a single MDX file.
Args:
top_level_sub: Context containing top-level command documentation
env: Jinja environment for rendering templates
output_dir: Directory to write output files
"""
content = render_command_and_subcommands(top_level_sub, env)
# "command_name" might be something like "prefect artifact"
# so let's extract the last token for the filename:
name_parts = top_level_sub["command_name"].split()
file_stub = name_parts[-1] if name_parts else "cli-root" # e.g. "artifact"
filename = f"{file_stub}.mdx"
file_path = Path(output_dir) / filename
Path(output_dir).mkdir(parents=True, exist_ok=True)
with Path.open(file_path, "w", encoding="utf-8") as f:
f.write(content)
def get_docs_for_click(
*,
obj: click.Command,
ctx: click.Context,
indent: int = 0,
name: str = "",
call_prefix: str = "",
) -> str:
"""Build the top-level docs context & generate one MDX file per subcommand.
Args:
obj: The Click command object to document
ctx: The Click context
indent: Indentation level for nested commands
name: Override name for the command
call_prefix: Prefix to add to command name
title: Override title for the command
Returns:
Empty string (files are written to disk)
"""
docs_context = build_docs_context(
obj=obj,
ctx=ctx,
indent=indent,
name=name,
call_prefix=call_prefix,
)
# Create the Jinja environment
env = Environment(
loader=FileSystemLoader("./scripts/templates"),
autoescape=select_autoescape(["html", "xml"]),
)
env.filters["escape_mdx"] = escape_mdx
# Where to store the generated MDX files
cli_dir = "./docs/v3/api-ref/cli"
# The top-level context is for "prefect" itself,
# so docs_context["subcommands"] are each top-level subcommand.
for sub_ctx in docs_context["subcommands"]:
write_subcommand_docs(sub_ctx, env, cli_dir)
return ""
if __name__ == "__main__":
with warnings.catch_warnings():
from prefect.cli.root import app
# Convert a Typer app to a Click command object.
click_obj: click.Command = typer.main.get_command(app)
# Create a click.Context for it.
main_ctx: click.Context = click.Context(click_obj)
get_docs_for_click(obj=click_obj, ctx=main_ctx)
| BuildDocsContext |
python | xlwings__xlwings | xlwings/conversion/standard.py | {
"start": 5142,
"end": 5309
} | class ____:
def __call__(self, c):
c.value = [
[e[i] for e in c.value] for i in range(len(c.value[0]) if c.value else 0)
]
| TransposeStage |
python | joke2k__faker | tests/providers/test_python.py | {
"start": 12034,
"end": 19665
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker()
Faker.seed(0)
def test_pydecimal(self):
result = self.fake.pydecimal()
self.assertIsInstance(result, decimal.Decimal)
def test_left_digits(self):
expected_left_digits = 10
result = self.fake.pydecimal(left_digits=expected_left_digits)
left_digits = len(str(abs(int(result))))
self.assertGreaterEqual(expected_left_digits, left_digits)
def test_left_digits_can_be_zero(self):
expected_left_digits = 0
result = self.fake.pydecimal(left_digits=expected_left_digits)
left_digits = int(result)
self.assertEqual(expected_left_digits, left_digits)
def test_right_digits(self):
expected_right_digits = 10
result = self.fake.pydecimal(right_digits=expected_right_digits)
right_digits = len(str(result).split(".")[1])
self.assertGreaterEqual(expected_right_digits, right_digits)
def test_positive(self):
result = self.fake.pydecimal(positive=True)
self.assertGreater(result, 0)
abs_result = -result if result < 0 else result # abs() result returns scientific notation
self.assertEqual(result, abs_result)
def test_min_value(self):
min_values = (0, 10, -1000, 1000, 999999)
for min_value in min_values:
result = self.fake.pydecimal(min_value=min_value)
self.assertGreaterEqual(result, min_value)
def test_min_value_always_returns_a_decimal(self):
min_values = (0, 10, -1000, 1000, 999999)
for min_value in min_values:
result = self.fake.pydecimal(min_value=min_value)
self.assertIsInstance(result, decimal.Decimal)
def test_min_value_and_left_digits(self):
"""
Combining the min_value and left_digits keyword arguments produces
numbers that obey both of those constraints.
"""
result = self.fake.pydecimal(left_digits=1, min_value=0)
self.assertLess(result, 10)
self.assertGreaterEqual(result, 0)
def test_max_value(self):
max_values = (0, 10, -1000, 1000, 999999)
for max_value in max_values:
result = self.fake.pydecimal(max_value=max_value)
self.assertLessEqual(result, max_value)
def test_max_value_always_returns_a_decimal(self):
max_values = (0, 10, -1000, 1000, 999999)
for max_value in max_values:
result = self.fake.pydecimal(max_value=max_value)
self.assertIsInstance(result, decimal.Decimal)
def test_max_value_zero_and_left_digits(self):
"""
Combining the max_value and left_digits keyword arguments produces
numbers that obey both of those constraints.
"""
result = self.fake.pydecimal(left_digits=2, max_value=0)
self.assertLessEqual(result, 0)
self.assertGreater(result, -100)
def test_max_value_should_be_greater_than_min_value(self):
"""
An exception should be raised if min_value is greater than max_value
"""
expected_message = "Min value cannot be greater than max value"
with self.assertRaises(ValueError) as raises:
self.fake.pydecimal(min_value=100, max_value=0)
message = str(raises.exception)
self.assertEqual(message, expected_message)
def test_max_value_and_positive(self):
"""
Combining the max_value and positive keyword arguments produces
numbers that obey both of those constraints.
"""
result = self.fake.pydecimal(positive=True, max_value=100)
self.assertLessEqual(result, 100)
self.assertGreater(result, 0)
def test_max_and_min_value_negative(self):
"""
Combining the max_value and min_value keyword arguments with
negative values for each produces numbers that obey both of
those constraints.
"""
result = self.fake.pydecimal(max_value=-100, min_value=-200)
self.assertLessEqual(result, -100)
self.assertGreaterEqual(result, -200)
def test_positive_and_min_value_incompatible(self):
"""
An exception should be raised if positive=True is set, but
a negative min_value is provided.
"""
expected_message = "Cannot combine positive=True with negative or zero min_value"
with self.assertRaises(ValueError) as raises:
self.fake.pydecimal(min_value=-100, positive=True)
message = str(raises.exception)
self.assertEqual(message, expected_message)
def test_positive_doesnt_return_zero(self):
"""
Choose the right_digits and max_value so it's guaranteed to return zero,
then watch as it doesn't because positive=True
"""
result = self.fake.pydecimal(positive=True, right_digits=0, max_value=1)
self.assertGreater(result, 0)
def test_min_value_zero_doesnt_return_negative(self):
Faker.seed("1")
result = self.fake.pydecimal(left_digits=3, right_digits=2, min_value=0, max_value=999)
self.assertGreater(result, 0)
def test_min_value_one_hundred_doesnt_return_negative(self):
Faker.seed("1")
result = self.fake.pydecimal(left_digits=3, right_digits=2, min_value=100, max_value=999)
self.assertGreater(result, 100)
def test_min_value_minus_one_doesnt_return_positive(self):
Faker.seed("5")
result = self.fake.pydecimal(left_digits=3, right_digits=2, min_value=-999, max_value=0)
self.assertLess(result, 0)
def test_min_value_minus_one_hundred_doesnt_return_positive(self):
Faker.seed("5")
result = self.fake.pydecimal(left_digits=3, right_digits=2, min_value=-999, max_value=-100)
self.assertLess(result, -100)
def test_min_value_10_pow_1000_return_greater_number(self):
Faker.seed("2")
result = self.fake.pydecimal(min_value=10**1000)
self.assertGreater(result, 10**1000)
def test_min_value_and_max_value_have_different_signs_return_evenly_distributed_values(self):
result = []
boundary_value = 10
for _ in range(1000):
result.append(self.fake.pydecimal(min_value=-boundary_value, max_value=boundary_value, right_digits=0))
self.assertEqual(len(Counter(result)), 2 * boundary_value + 1)
def test_min_value_and_max_value_negative_return_evenly_distributed_values(self):
result = []
min_value = -60
max_value = -50
for _ in range(1000):
result.append(self.fake.pydecimal(min_value=min_value, max_value=max_value, right_digits=0))
self.assertGreater(len(Counter(result)), max_value - min_value)
def test_min_value_and_max_value_positive_return_evenly_distributed_values(self):
result = []
min_value = 50
max_value = 60
for _ in range(1000):
result.append(self.fake.pydecimal(min_value=min_value, max_value=max_value, right_digits=0))
self.assertGreater(len(Counter(result)), max_value - min_value)
def test_min_value_float_returns_correct_digit_number(self):
Faker.seed("6")
result = self.fake.pydecimal(left_digits=1, right_digits=1, min_value=0.2, max_value=0.3)
self.assertEqual(decimal.Decimal("0.2"), result)
def test_max_value_float_returns_correct_digit_number(self):
Faker.seed("3")
result = self.fake.pydecimal(left_digits=1, right_digits=1, min_value=0.2, max_value=0.3)
self.assertEqual(decimal.Decimal("0.3"), result)
| TestPydecimal |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_shape_base_.py | {
"start": 20296,
"end": 20940
} | class ____(TestCase):
"""Only testing for integer splits."""
def test_non_iterable(self):
assert_raises(ValueError, vsplit, 1, 1)
def test_0D_array(self):
a = np.array(1)
assert_raises(ValueError, vsplit, a, 2)
def test_1D_array(self):
a = np.array([1, 2, 3, 4])
try:
vsplit(a, 2)
assert_(0)
except ValueError:
pass
def test_2D_array(self):
a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
res = vsplit(a, 2)
desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])]
compare_results(res, desired)
| TestVsplit |
python | falconry__falcon | tests/test_uri_templates.py | {
"start": 2399,
"end": 17199
} | class ____:
def __init__(self):
self.get_called = False
self.post_called = False
self.put_called = False
def on_get(self, req, resp, collection_id, item_id):
self.collection_id = collection_id
self.item_id = item_id
self.get_called = True
def on_post(self, req, resp, collection_id, item_id):
self.collection_id = collection_id
self.item_id = item_id
self.post_called = True
def on_put(self, req, resp, collection_id, item_id):
self.collection_id = collection_id
self.item_id = item_id
self.put_called = True
def on_get_collection(self, req, resp, collection_id):
self.collection_id = collection_id
self.get_called = True
def on_post_collection(self, req, resp, collection_id):
self.collection_id = collection_id
self.post_called = True
def on_put_collection(self, req, resp, collection_id):
self.collection_id = collection_id
self.put_called = True
@pytest.fixture
def resource():
return testing.SimpleTestResource()
@pytest.fixture
def client(asgi, util):
return testing.TestClient(util.create_app(asgi))
def test_root_path(client, resource):
client.app.add_route('/', resource)
client.simulate_get('/')
assert resource.called
def test_no_vars(client, resource):
client.app.add_route('/hello/world', resource)
client.simulate_get('/hello/world')
assert resource.called
def test_special_chars(client, resource):
client.app.add_route('/hello/world.json', resource)
client.app.add_route('/hello(world)', resource)
client.simulate_get('/hello/world_json')
assert not resource.called
client.simulate_get('/helloworld')
assert not resource.called
client.simulate_get('/hello/world.json')
assert resource.called
client.simulate_get('/hello(world)')
assert resource.called
@pytest.mark.parametrize(
'field_name',
[
'id',
'id123',
'widget_id',
],
)
def test_single(client, resource, field_name):
template = f'/widgets/{{{field_name}}}'
client.app.add_route(template, resource)
client.simulate_get('/widgets/123')
assert resource.called
assert resource.captured_kwargs[field_name] == '123'
def test_single_path_segment(client):
id_resource = IDResource()
client.app.add_route('/thing-{id}', id_resource)
client.simulate_get('/thing-foo')
assert id_resource.id == 'foo'
@pytest.mark.parametrize(
'uri_template,',
[
'/{id:int}',
'/{id:int(3)}',
'/{id:int(min=123)}',
'/{id:int(min=123, max=123)}',
],
)
def test_int_converter(client, uri_template):
resource1 = IDResource()
client.app.add_route(uri_template, resource1)
result = client.simulate_get('/123')
assert result.status_code == 200
assert resource1.called
assert resource1.id == 123
assert resource1.req.path == '/123'
@pytest.mark.parametrize('id_value', [2, 2.1, 1.9])
@pytest.mark.parametrize(
'uri_template,',
[
'/{id:float}',
'/{id:float(1)}',
'/{id:float(min=1.9)}',
'/{id:float(min=1.8, max=3)}',
],
)
def test_float_converter(client, uri_template, id_value):
resource1 = IDResource()
client.app.add_route(uri_template, resource1)
result = client.simulate_get(f'/{id_value}')
assert result.status_code == 200
assert resource1.called
assert resource1.id == id_value
assert resource1.req.path == f'/{id_value}'
@pytest.mark.parametrize('value', ['nan', '-inf', 'inf'])
def test_float_converter_non_finite_allowed(value, client):
resource1 = IDResource()
client.app.add_route('/{id:float(finite=False)}', resource1)
result = client.simulate_get('/' + value)
assert result.status_code == 200
assert resource1.called
assert not math.isfinite(resource1.id)
def test_float_converter_non_finite_disallowed(client):
resource1 = IDResource()
client.app.add_route('/{id:float}', resource1)
result = client.simulate_get('/NaN')
assert result.status_code == 404
assert not resource1.called
@pytest.mark.parametrize(
'uri_template,',
[
'/{id:int(2)}',
'/{id:int(min=124)}',
'/{id:int(num_digits=3, max=100)}',
],
)
def test_int_converter_rejections(client, uri_template):
resource1 = IDResource()
client.app.add_route(uri_template, resource1)
result = client.simulate_get('/123')
assert result.status_code == 404
assert not resource1.called
@pytest.mark.parametrize(
'uri_template, path, dt_expected',
[
(
'/{start_year:int}-to-{timestamp:dt}',
'/1961-to-1969-07-21T02:56:00Z',
datetime(1969, 7, 21, 2, 56, 0, tzinfo=timezone.utc),
),
(
'/{start_year:int}-to-{timestamp:dt("%Y-%m-%d")}',
'/1961-to-1969-07-21',
datetime(1969, 7, 21),
),
(
'/{start_year:int}/{timestamp:dt("%Y-%m-%d %H:%M")}',
'/1961/1969-07-21 14:30',
datetime(1969, 7, 21, 14, 30),
),
('/{start_year:int}-to-{timestamp:dt("%Y-%m")}', '/1961-to-1969-07-21', None),
],
)
def test_datetime_converter(client, resource, uri_template, path, dt_expected):
client.app.add_route(uri_template, resource)
result = client.simulate_get(path)
if dt_expected is None:
assert result.status_code == 404
assert not resource.called
else:
assert result.status_code == 200
assert resource.called
assert resource.captured_kwargs['start_year'] == 1961
assert resource.captured_kwargs['timestamp'] == dt_expected
@pytest.mark.parametrize(
'uri_template, path, expected',
_as_params(
(
'/widgets/{widget_id:uuid}',
'/widgets/' + _TEST_UUID_STR,
{'widget_id': _TEST_UUID},
),
(
'/widgets/{widget_id:uuid}/orders',
'/widgets/' + _TEST_UUID_STR_SANS_HYPHENS + '/orders',
{'widget_id': _TEST_UUID},
),
(
'/versions/diff/{left:uuid()}...{right:uuid()}',
f'/versions/diff/{_TEST_UUID_STR}...{_TEST_UUID_STR_2}',
{
'left': _TEST_UUID,
'right': _TEST_UUID_2,
},
),
(
'/versions/diff/{left:uuid}...{right:uuid()}',
f'/versions/diff/{_TEST_UUID_STR}...{_TEST_UUID_STR_2}',
{
'left': _TEST_UUID,
'right': _TEST_UUID_2,
},
),
(
'/versions/diff/{left:uuid()}...{right:uuid}',
f'/versions/diff/{_TEST_UUID_STR}...{_TEST_UUID_STR_2}',
{
'left': _TEST_UUID,
'right': _TEST_UUID_2,
},
),
(
'/widgets/{widget_id:uuid}/orders',
'/widgets/' + _TEST_UUID_STR_SANS_HYPHENS[:-1] + '/orders',
None,
),
prefix='uuid_converter',
),
)
def test_uuid_converter(client, resource, uri_template, path, expected):
client.app.add_route(uri_template, resource)
result = client.simulate_get(path)
if expected is None:
assert result.status_code == 404
assert not resource.called
else:
assert result.status_code == 200
assert resource.called
assert resource.captured_kwargs == expected
def test_uuid_converter_complex_segment(client, resource):
client.app.add_route('/pages/{first:uuid}...{last:uuid}', resource)
first_uuid = uuid.uuid4()
last_uuid = uuid.uuid4()
result = client.simulate_get(f'/pages/{first_uuid}...{last_uuid}')
assert result.status_code == 200
assert resource.called
assert resource.captured_kwargs['first'] == first_uuid
assert resource.captured_kwargs['last'] == last_uuid
@pytest.mark.parametrize(
'uri_template, path, expected',
[
('/{food:spam}', '/something', {'food': 'spam!'}),
(
'/{food:spam(")")}:{food_too:spam("()")}',
'/bacon:eggs',
{'food': 'spam!', 'food_too': 'spam!'},
),
(
'/({food:spam()}){food_too:spam("()")}',
'/(bacon)eggs',
{'food': 'spam!', 'food_too': 'spam!'},
),
],
)
def test_converter_custom(client, resource, uri_template, path, expected):
class SpamConverter:
def __init__(self, useless_text=None):
pass
def convert(self, fragment):
return 'spam!'
client.app.router_options.converters['spam'] = SpamConverter
client.app.add_route(uri_template, resource)
result = client.simulate_get(path)
assert result.status_code == 200
assert resource.called
assert resource.captured_kwargs == expected
def test_single_trailing_slash(client):
resource1 = IDResource()
client.app.add_route('/1/{id}/', resource1)
assert client.simulate_get('/1/123').status_code == 404
result = client.simulate_get('/1/123/')
assert result.status == falcon.HTTP_200
assert resource1.called
assert resource1.id == '123'
assert resource1.req.path == '/1/123/'
resource2 = IDResource()
client.app.add_route('/2/{id}/', resource2)
result = client.simulate_get('/2/123')
assert result.status == falcon.HTTP_404
assert not resource2.called
assert resource2.id is None
resource3 = IDResource()
client.app.add_route('/3/{id}', resource3)
client.app.req_options.strip_url_path_trailing_slash = True
result = client.simulate_get('/3/123/')
assert result.status == falcon.HTTP_200
assert resource3.called
assert resource3.id == '123'
assert resource3.req.path == '/3/123'
resource4 = IDResource()
client.app.add_route('/4/{id}', resource4)
client.app.req_options.strip_url_path_trailing_slash = False
result = client.simulate_get('/4/123/')
assert result.status == falcon.HTTP_404
assert not resource4.called
assert resource4.id is None
def test_multiple(client):
resource = NameResource()
client.app.add_route('/messages/{id}/names/{name}', resource)
test_id = 'bfb54d43-219b-4336-a623-6172f920592e'
test_name = '758e3922-dd6d-4007-a589-50fba0789365'
path = '/messages/' + test_id + '/names/' + test_name
client.simulate_get(path)
assert resource.called
assert resource.id == test_id
assert resource.name == test_name
@pytest.mark.parametrize(
'uri_template',
[
'//',
'//begin',
'/end//',
'/in//side',
],
)
def test_empty_path_component(client, resource, uri_template):
with pytest.raises(ValueError):
client.app.add_route(uri_template, resource)
@pytest.mark.parametrize(
'uri_template',
[
'',
'no',
'no/leading_slash',
],
)
def test_relative_path(client, resource, uri_template):
with pytest.raises(ValueError):
client.app.add_route(uri_template, resource)
@pytest.mark.parametrize('reverse', [True, False])
def test_same_level_complex_var(client, reverse):
file_resource = FileResource()
details_resource = FileDetailsResource()
routes = [
('/files/{file_id}', file_resource),
('/files/{file_id}.{ext}', details_resource),
]
if reverse:
routes.reverse()
for uri_template, resource in routes:
client.app.add_route(uri_template, resource)
file_id_1 = 'bc6b201d-b449-4290-a061-8eeb9f7b1450'
file_id_2 = '33b7f34c-6ee6-40e6-89a3-742a69b59de0'
ext = 'a4581b95-bc36-4c08-a3c2-23ba266abdf2'
path_1 = '/files/' + file_id_1
path_2 = '/files/' + file_id_2 + '.' + ext
client.simulate_get(path_1)
assert file_resource.called
assert file_resource.file_id == file_id_1
client.simulate_get(path_2)
assert details_resource.called
assert details_resource.file_id == file_id_2
assert details_resource.ext == ext
def test_adding_suffix_routes(client):
resource_with_suffix_routes = ResourceWithSuffixRoutes()
client.app.add_route(
'/collections/{collection_id}/items/{item_id}', resource_with_suffix_routes
)
client.app.add_route(
'/collections/{collection_id}/items',
resource_with_suffix_routes,
suffix='collection',
)
# GET
client.simulate_get('/collections/123/items/456')
assert resource_with_suffix_routes.collection_id == '123'
assert resource_with_suffix_routes.item_id == '456'
assert resource_with_suffix_routes.get_called
client.simulate_get('/collections/foo/items')
assert resource_with_suffix_routes.collection_id == 'foo'
# POST
client.simulate_post('/collections/foo234/items/foo456')
assert resource_with_suffix_routes.collection_id == 'foo234'
assert resource_with_suffix_routes.item_id == 'foo456'
assert resource_with_suffix_routes.post_called
client.simulate_post('/collections/foo123/items')
assert resource_with_suffix_routes.collection_id == 'foo123'
# PUT
client.simulate_put('/collections/foo345/items/foo567')
assert resource_with_suffix_routes.collection_id == 'foo345'
assert resource_with_suffix_routes.item_id == 'foo567'
assert resource_with_suffix_routes.put_called
client.simulate_put('/collections/foo321/items')
assert resource_with_suffix_routes.collection_id == 'foo321'
@pytest.mark.parametrize('reverse', [True, False])
def test_with_and_without_trailing_slash(client, reverse):
routes = [
('/kitchen', KitchenSinkResource()),
('/kitchen/', KitchenSinkResource()),
('/kitchen/{item}', KitchenSinkResource()),
('/kitchen/{item}/', KitchenSinkResource()),
('/kitchen/sink', KitchenSinkResource()),
('/kitchen/sink/', KitchenSinkResource()),
]
if reverse:
routes.reverse()
for route in routes:
client.app.add_route(*route)
for uri_template, resource in routes:
item = None
if '{item}' in uri_template:
item = 'kettle' if uri_template.endswith('/') else 'teapot'
resp = client.simulate_get(uri_template.replace('{item}', item or ''))
assert resp.status_code == 200
assert resource.call_count == 1
assert resource.kwargs.get('item') == item
assert resource.uri_template == uri_template
def test_custom_error_on_suffix_route_not_found(client):
resource_with_suffix_routes = ResourceWithSuffixRoutes()
with pytest.raises(SuffixedMethodNotFoundError):
client.app.add_route(
'/collections/{collection_id}/items',
resource_with_suffix_routes,
suffix='bad-alt',
)
| ResourceWithSuffixRoutes |
python | pyparsing__pyparsing | examples/eval_arith.py | {
"start": 2317,
"end": 6298
} | class ____:
"Class to evaluate comparison expressions"
opMap = {
"<": lambda a, b: a < b,
"<=": lambda a, b: a <= b,
">": lambda a, b: a > b,
">=": lambda a, b: a >= b,
"!=": lambda a, b: a != b,
"=": lambda a, b: a == b,
"LT": lambda a, b: a < b,
"LE": lambda a, b: a <= b,
"GT": lambda a, b: a > b,
"GE": lambda a, b: a >= b,
"NE": lambda a, b: a != b,
"EQ": lambda a, b: a == b,
"<>": lambda a, b: a != b,
}
def __init__(self, tokens):
self.value = tokens[0]
def eval(self):
val1 = self.value[0].eval()
for op, val in operatorOperands(self.value[1:]):
fn = EvalComparisonOp.opMap[op]
val2 = val.eval()
if not fn(val1, val2):
break
val1 = val2
else:
return True
return False
# define the parser
integer = Word(nums)
real = Combine(Word(nums) + "." + Word(nums))
variable = Word(alphas, exact=1)
operand = real | integer | variable
signop = one_of("+ -")
multop = one_of("* /")
plusop = one_of("+ -")
expop = Literal("**")
# use parse actions to attach EvalXXX constructors to sub-expressions
operand.set_parse_action(EvalConstant)
arith_expr = infix_notation(
operand,
[
(signop, 1, OpAssoc.RIGHT, EvalSignOp),
(expop, 2, OpAssoc.LEFT, EvalPowerOp),
(multop, 2, OpAssoc.LEFT, EvalMultOp),
(plusop, 2, OpAssoc.LEFT, EvalAddOp),
],
)
comparisonop = one_of("< <= > >= != = <> LT GT LE GE EQ NE")
comp_expr = infix_notation(
arith_expr,
[
(comparisonop, 2, OpAssoc.LEFT, EvalComparisonOp),
],
)
# sample expressions posted on comp.lang.python, asking for advice
# in safely evaluating them
rules = [
"( A - B ) = 0",
"( B - C + B ) = 0",
"(A + B + C + D + E + F + G + H + I) = J",
"(A + B + C + D + E + F + G + H) = I",
"(A + B + C + D + E + F) = G",
"(A + B + C + D + E) = (F + G + H + I + J)",
"(A + B + C + D + E) = (F + G + H + I)",
"(A + B + C + D + E) = F",
"(A + B + C + D) = (E + F + G + H)",
"(A + B + C) = D",
"(A + B + C) = (D + E + F)",
"(A + B) = (C + D + E + F)",
"(A + B) = (C + D)",
"(A + B) = (C - D + E - F - G + H + I + J)",
"(A + B) = C",
"(A + B) = 0",
"(A+B+C+D+E) = (F+G+H+I+J)",
"(A+B+C+D) = (E+F+G+H)",
"(A+B+C+D)=(E+F+G+H)",
"(A+B+C)=(D+E+F)",
"(A+B)=(C+D)",
"(A+B)=C",
"(A-B)=C",
"(A/(B+C))",
"(B/(C+D))",
"(G + H) = I",
"-0.99 LE ((A+B+C)-(D+E+F+G)) LE 0.99",
"-0.99 LE (A-(B+C)) LE 0.99",
"-1000.00 LE A LE 0.00",
"-5000.00 LE A LE 0.00",
"A < B",
"A < 7000",
"A = -(B)",
"A = C",
"A = 0",
"A GT 0",
"A GT 0.00",
"A GT 7.00",
"A LE B",
"A LT -1000.00",
"A LT -5000",
"A LT 0",
"G=(B+C+D)",
"A=B",
"I = (G + H)",
"0.00 LE A LE 4.00",
"4.00 LT A LE 7.00",
"0.00 LE A LE 4.00 LE E > D",
"2**2**(A+3)",
]
vars_ = {
"A": 0,
"B": 1.1,
"C": 2.2,
"D": 3.3,
"E": 4.4,
"F": 5.5,
"G": 6.6,
"H": 7.7,
"I": 8.8,
"J": 9.9,
}
# define tests from given rules
tests = []
for t in rules:
t_orig = t
t = t.replace("=", "==")
t = t.replace("EQ", "==")
t = t.replace("LE", "<=")
t = t.replace("GT", ">")
t = t.replace("LT", "<")
t = t.replace("GE", ">=")
t = t.replace("LE", "<=")
t = t.replace("NE", "!=")
t = t.replace("<>", "!=")
tests.append((t_orig, eval(t, vars_)))
# copy vars_ to EvalConstant lookup dict
EvalConstant.vars_ = vars_
failed = 0
for test, expected in tests:
ret = comp_expr.parse_string(test)[0]
parsedvalue = ret.eval()
print(test, expected, parsedvalue)
if abs(parsedvalue - expected) > 1e-6:
print("<<< FAIL")
failed += 1
else:
print("")
print("")
if failed:
raise Exception("could not parse")
| EvalComparisonOp |
python | pydantic__pydantic | pydantic/v1/schema.py | {
"start": 47621,
"end": 47801
} | class ____(Exception):
"""
Utility exception used to exclude fields from schema.
"""
def __init__(self, message: str) -> None:
self.message = message
| SkipField |
python | apache__airflow | providers/apache/beam/tests/unit/apache/beam/operators/test_beam.py | {
"start": 16262,
"end": 24403
} | class ____:
@pytest.fixture(autouse=True)
def setup_test_cases(self, default_options, pipeline_options):
self.default_op_kwargs = {
"task_id": TASK_ID,
"jar": JAR_FILE,
"job_class": JOB_CLASS,
"default_pipeline_options": copy.deepcopy(default_options),
"pipeline_options": copy.deepcopy(pipeline_options),
}
def test_init(self, default_options, pipeline_options):
op = BeamRunJavaPipelineOperator(**self.default_op_kwargs, dataflow_config={})
# Should not change into the operator constructor, it might define in templated_fields
assert op.default_pipeline_options == default_options
assert op.pipeline_options == pipeline_options
assert op.dataflow_config == {}
assert op.job_class == JOB_CLASS
assert op.jar == JAR_FILE
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_exec_direct_runner(self, gcs_hook, beam_hook_mock, default_options, pipeline_options):
"""Test BeamHook is created and the right args are passed to
start_java_workflow.
"""
start_java_hook = beam_hook_mock.return_value.start_java_pipeline
gcs_provide_file = gcs_hook.return_value.provide_file
op = BeamRunJavaPipelineOperator(**self.default_op_kwargs)
op.execute({})
beam_hook_mock.assert_called_once_with(runner=DEFAULT_RUNNER)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
start_java_hook.assert_called_once_with(
variables={**default_options, **pipeline_options},
jar=gcs_provide_file.return_value.__enter__.return_value.name,
job_class=JOB_CLASS,
)
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_direct_runner_no_op_extra_links(
self, gcs_hook, beam_hook_mock, default_options, pipeline_options
):
"""Test there is no operator_extra_links when running pipeline with direct runner type."""
start_java_hook = beam_hook_mock.return_value.start_java_pipeline
op = BeamRunJavaPipelineOperator(**self.default_op_kwargs)
op.execute({})
beam_hook_mock.assert_called_once_with(runner=DEFAULT_RUNNER)
start_java_hook.assert_called_once()
assert not op.operator_extra_links
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowJobLink.persist"))
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_exec_dataflow_runner(self, gcs_hook, dataflow_hook_mock, beam_hook_mock, persist_link_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_dataflow.
"""
dataflow_config = DataflowConfiguration(impersonation_chain="test@impersonation.com")
op = BeamRunJavaPipelineOperator(
**self.default_op_kwargs, dataflow_config=dataflow_config, runner="DataflowRunner"
)
gcs_provide_file = gcs_hook.return_value.provide_file
dataflow_hook_mock.return_value.is_job_dataflow_running.return_value = False
op.execute({})
job_name = dataflow_hook_mock.build_dataflow_job_name.return_value
dataflow_hook_mock.assert_called_once_with(
gcp_conn_id=dataflow_config.gcp_conn_id,
poll_sleep=dataflow_config.poll_sleep,
impersonation_chain=dataflow_config.impersonation_chain,
drain_pipeline=dataflow_config.drain_pipeline,
cancel_timeout=dataflow_config.cancel_timeout,
wait_until_finished=dataflow_config.wait_until_finished,
)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
expected_options = {
"project": dataflow_hook_mock.return_value.project_id,
"jobName": job_name,
"stagingLocation": "gs://test/staging",
"region": "us-central1",
"labels": {"foo": "bar"},
"output": "gs://test/output",
"impersonateServiceAccount": TEST_IMPERSONATION_ACCOUNT,
}
persist_link_mock.assert_called_once_with(
context={},
region="us-central1",
job_id=None,
project_id=dataflow_hook_mock.return_value.project_id,
)
beam_hook_mock.return_value.start_java_pipeline.assert_called_once_with(
variables=expected_options,
jar=gcs_provide_file.return_value.__enter__.return_value.name,
job_class=JOB_CLASS,
process_line_callback=mock.ANY,
is_dataflow_job_id_exist_callback=mock.ANY,
)
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowJobLink.persist"))
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_exec_dataflow_runner__no_dataflow_job_name(
self, gcs_hook, dataflow_hook_mock, beam_hook_mock, persist_link_mock
):
"""Test that the task_id is passed as the Dataflow job name if not set in dataflow_config."""
dataflow_config = DataflowConfiguration(impersonation_chain="test@impersonation.com")
op = BeamRunJavaPipelineOperator(
**self.default_op_kwargs, dataflow_config=dataflow_config, runner="DataflowRunner"
)
dataflow_hook_mock.return_value.is_job_dataflow_running.return_value = False
op.execute({})
assert op.dataflow_config.job_name == op.task_id
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowJobLink.persist"))
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
def test_on_kill_dataflow_runner(self, dataflow_hook_mock, _, __, ___):
dataflow_hook_mock.return_value.is_job_dataflow_running.return_value = False
dataflow_cancel_job = dataflow_hook_mock.return_value.cancel_job
op = BeamRunJavaPipelineOperator(**self.default_op_kwargs, runner="DataflowRunner")
op.execute({})
op.dataflow_job_id = JOB_ID
op.on_kill()
dataflow_cancel_job.assert_called_once_with(
job_id=JOB_ID, project_id=op.dataflow_config.project_id, location=op.dataflow_config.location
)
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_on_kill_direct_runner(self, _, dataflow_mock, __):
dataflow_cancel_job = dataflow_mock.return_value.cancel_job
op = BeamRunJavaPipelineOperator(**self.default_op_kwargs)
op.execute(None)
op.on_kill()
dataflow_cancel_job.assert_not_called()
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowJobLink.persist"))
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_dataflow_streaming_not_stuck(
self, gcs_hook, dataflow_hook_mock, beam_hook_mock, persist_link_mock
):
"""Check that start java streaming pipeline does not enter infinite loop,
when streaming pipeline with the same prefix is already running and check_is_running=True"""
dataflow_config = DataflowConfiguration()
op_kwargs = copy.deepcopy(self.default_op_kwargs)
op_kwargs["pipeline_options"]["streaming"] = True
dataflow_hook_mock.return_value.is_job_dataflow_running.return_value = True
start_java_mock = beam_hook_mock.return_value.start_java_pipeline
op = BeamRunJavaPipelineOperator(
**op_kwargs, dataflow_config=dataflow_config, runner="DataflowRunner"
)
res = op.execute({})
start_java_mock.assert_not_called()
assert res == {"dataflow_job_id": None}
| TestBeamRunJavaPipelineOperator |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels15.py | {
"start": 315,
"end": 1332
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels15.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "doughnut"})
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {"value": 1, "leader_lines": 1, "position": "best_fit"},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | wandb__wandb | wandb/sdk/data_types/graph.py | {
"start": 1717,
"end": 6188
} | class ____(WBValue):
"""Node used in `Graph`."""
def __init__(
self,
id=None,
name=None,
class_name=None,
size=None,
parameters=None,
output_shape=None,
is_output=None,
num_parameters=None,
node=None,
):
self._attributes = {"name": None}
self.in_edges = {} # indexed by source node id
self.out_edges = {} # indexed by dest node id
# optional object (e.g. PyTorch Parameter or Module) that this Node represents
self.obj = None
if node is not None:
self._attributes.update(node._attributes)
del self._attributes["id"]
self.obj = node.obj
if id is not None:
self.id = id
if name is not None:
self.name = name
if class_name is not None:
self.class_name = class_name
if size is not None:
self.size = size
if parameters is not None:
self.parameters = parameters
if output_shape is not None:
self.output_shape = output_shape
if is_output is not None:
self.is_output = is_output
if num_parameters is not None:
self.num_parameters = num_parameters
def to_json(self, run=None):
return self._attributes
def __repr__(self):
return repr(self._attributes)
@property
def id(self):
"""Must be unique in the graph."""
return self._attributes.get("id")
@id.setter
def id(self, val):
self._attributes["id"] = val
return val
@property
def name(self):
"""Usually the type of layer or sublayer."""
return self._attributes.get("name")
@name.setter
def name(self, val):
self._attributes["name"] = val
return val
@property
def class_name(self):
"""Usually the type of layer or sublayer."""
return self._attributes.get("class_name")
@class_name.setter
def class_name(self, val):
self._attributes["class_name"] = val
return val
@property
def functions(self):
return self._attributes.get("functions", [])
@functions.setter
def functions(self, val):
self._attributes["functions"] = val
return val
@property
def parameters(self):
return self._attributes.get("parameters", [])
@parameters.setter
def parameters(self, val):
self._attributes["parameters"] = val
return val
@property
def size(self):
return self._attributes.get("size")
@size.setter
def size(self, val):
"""Tensor size."""
self._attributes["size"] = tuple(val)
return val
@property
def output_shape(self):
return self._attributes.get("output_shape")
@output_shape.setter
def output_shape(self, val):
"""Tensor output_shape."""
self._attributes["output_shape"] = val
return val
@property
def is_output(self):
return self._attributes.get("is_output")
@is_output.setter
def is_output(self, val):
"""Tensor is_output."""
self._attributes["is_output"] = val
return val
@property
def num_parameters(self):
return self._attributes.get("num_parameters")
@num_parameters.setter
def num_parameters(self, val):
"""Tensor num_parameters."""
self._attributes["num_parameters"] = val
return val
@property
def child_parameters(self):
return self._attributes.get("child_parameters")
@child_parameters.setter
def child_parameters(self, val):
"""Tensor child_parameters."""
self._attributes["child_parameters"] = val
return val
@property
def is_constant(self):
return self._attributes.get("is_constant")
@is_constant.setter
def is_constant(self, val):
"""Tensor is_constant."""
self._attributes["is_constant"] = val
return val
@classmethod
def from_keras(cls, layer):
node = cls()
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = ["multiple"]
node.id = layer.name
node.name = layer.name
node.class_name = layer.__class__.__name__
node.output_shape = output_shape
node.num_parameters = layer.count_params()
return node
| Node |
python | readthedocs__readthedocs.org | readthedocs/filetreediff/dataclasses.py | {
"start": 356,
"end": 477
} | class ____:
"""The build associated with a file tree manifest."""
id: int
@dataclass(slots=True)
| FileTreeDiffBuild |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py | {
"start": 7569,
"end": 8005
} | class ____(LlamaPreTrainedModel):
_can_compile_fullgraph = False
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, HunYuanMoEV1Experts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
| HunYuanMoEV1PreTrainedModel |
python | protocolbuffers__protobuf | python/google/protobuf/json_format.py | {
"start": 1705,
"end": 1779
} | class ____(Exception):
"""Top-level module error for json_format."""
| Error |
python | automl__auto-sklearn | autosklearn/metalearning/optimizers/metalearn_optimizer/metalearner.py | {
"start": 222,
"end": 5566
} | class ____(object):
def __init__(
self,
dataset_name,
configuration_space,
meta_base,
logger,
distance="l1",
seed=None,
use_features=None,
distance_kwargs=None,
):
self.dataset_name = dataset_name
self.configuration_space = configuration_space
self.meta_base = meta_base
self.distance = distance
self.seed = seed
self.use_features = use_features
self.distance_kwargs = distance_kwargs
self.kND = None # For caching, makes things faster...
self.logger = logger
def metalearning_suggest_all(self, exclude_double_configurations=True):
"""Return a list of the best hyperparameters of neighboring datasets"""
# TODO check if _learn was called before!
neighbors = self._learn(exclude_double_configurations)
hp_list = []
for neighbor in neighbors:
try:
configuration = self.meta_base.get_configuration_from_algorithm_index(
neighbor[2]
)
self.logger.info("%s %s %s" % (neighbor[0], neighbor[1], configuration))
except (KeyError):
self.logger.warning("Configuration %s not found" % neighbor[2])
continue
hp_list.append(configuration)
return hp_list
def metalearning_suggest(self, history):
"""Suggest the next promosing hyperparameters which were not yet evaluated"""
# TODO test the object in the history!
neighbors = self._learn()
# Iterate over all datasets which are sorted ascending by distance
history_with_indices = []
for run in history:
history_with_indices.append(
self.meta_base.get_algorithm_index_from_configuration(run)
)
for idx, neighbor in enumerate(neighbors):
already_evaluated = False
# Check if that dataset was already evaluated
for run in history_with_indices:
# If so, return to the outer loop
if neighbor[2] == run:
already_evaluated = True
break
if not already_evaluated:
self.logger.info(
"Nearest dataset with hyperparameters of best value "
"not evaluated yet is %s with a distance of %f"
% (neighbor[0], neighbor[1])
)
return self.meta_base.get_configuration_from_algorithm_index(
neighbor[2]
)
raise StopIteration("No more values available.")
def _learn(self, exclude_double_configurations=True):
dataset_metafeatures, all_other_metafeatures = self._split_metafeature_array()
# Remove metafeatures which could not be calculated for the target
# dataset
keep = []
for idx in dataset_metafeatures.index:
if np.isfinite(dataset_metafeatures.loc[idx]):
keep.append(idx)
dataset_metafeatures = dataset_metafeatures.loc[keep]
all_other_metafeatures = all_other_metafeatures.loc[:, keep]
# Do mean imputation of all other metafeatures
all_other_metafeatures = all_other_metafeatures.fillna(
all_other_metafeatures.mean()
)
if self.kND is None:
# In case that we learn our distance function, get_value the parameters for
# the random forest
if self.distance_kwargs:
rf_params = ast.literal_eval(self.distance_kwargs)
else:
rf_params = None
# To keep the distance the same in every iteration, we create a new
# random state
random_state = sklearn.utils.check_random_state(self.seed)
kND = KNearestDatasets(
metric=self.distance,
random_state=random_state,
logger=self.logger,
metric_params=rf_params,
)
runs = dict()
# TODO move this code to the metabase
for task_id in all_other_metafeatures.index:
try:
runs[task_id] = self.meta_base.get_runs(task_id)
except KeyError:
# TODO should I really except this?
self.logger.info("Could not find runs for instance %s" % task_id)
runs[task_id] = pd.Series([], name=task_id, dtype=np.float64)
runs = pd.DataFrame(runs)
kND.fit(all_other_metafeatures, runs)
self.kND = kND
return self.kND.kBestSuggestions(
dataset_metafeatures,
k=-1,
exclude_double_configurations=exclude_double_configurations,
)
def _split_metafeature_array(self):
dataset_metafeatures = self.meta_base.get_metafeatures(
self.dataset_name, self.use_features
)
all_other_datasets = self.meta_base.get_all_dataset_names()
all_other_datasets.remove(self.dataset_name)
all_other_metafeatures = self.meta_base.get_metafeatures(
all_other_datasets, self.use_features
)
return dataset_metafeatures, all_other_metafeatures
| MetaLearningOptimizer |
python | ansible__ansible | test/lib/ansible_test/_internal/util.py | {
"start": 30298,
"end": 30414
} | class ____(SystemExit):
"""Error raised when the test timeout has been reached or exceeded."""
| TimeoutExpiredError |
python | kubernetes-client__python | kubernetes/base/config/exec_provider_test.py | {
"start": 778,
"end": 8142
} | class ____(unittest.TestCase):
def setUp(self):
self.input_ok = ConfigNode('test', {
'command': 'aws-iam-authenticator',
'args': ['token', '-i', 'dummy'],
'apiVersion': 'client.authentication.k8s.io/v1beta1',
'env': None
})
self.input_with_cluster = ConfigNode('test', {
'command': 'aws-iam-authenticator',
'args': ['token', '-i', 'dummy'],
'apiVersion': 'client.authentication.k8s.io/v1beta1',
'provideClusterInfo': True,
'env': None
})
self.output_ok = """
{
"apiVersion": "client.authentication.k8s.io/v1beta1",
"kind": "ExecCredential",
"status": {
"token": "dummy"
}
}
"""
def test_missing_input_keys(self):
exec_configs = [ConfigNode('test1', {}),
ConfigNode('test2', {'command': ''}),
ConfigNode('test3', {'apiVersion': ''})]
for exec_config in exec_configs:
with self.assertRaises(ConfigException) as context:
ExecProvider(exec_config, None)
self.assertIn('exec: malformed request. missing key',
context.exception.args[0])
@mock.patch('subprocess.Popen')
def test_error_code_returned(self, mock):
instance = mock.return_value
instance.wait.return_value = 1
instance.communicate.return_value = ('', '')
with self.assertRaises(ConfigException) as context:
ep = ExecProvider(self.input_ok, None)
ep.run()
self.assertIn('exec: process returned %d' %
instance.wait.return_value, context.exception.args[0])
@mock.patch('subprocess.Popen')
def test_nonjson_output_returned(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
instance.communicate.return_value = ('', '')
with self.assertRaises(ConfigException) as context:
ep = ExecProvider(self.input_ok, None)
ep.run()
self.assertIn('exec: failed to decode process output',
context.exception.args[0])
@mock.patch('subprocess.Popen')
def test_missing_output_keys(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
outputs = [
"""
{
"kind": "ExecCredential",
"status": {
"token": "dummy"
}
}
""", """
{
"apiVersion": "client.authentication.k8s.io/v1beta1",
"status": {
"token": "dummy"
}
}
""", """
{
"apiVersion": "client.authentication.k8s.io/v1beta1",
"kind": "ExecCredential"
}
"""
]
for output in outputs:
instance.communicate.return_value = (output, '')
with self.assertRaises(ConfigException) as context:
ep = ExecProvider(self.input_ok, None)
ep.run()
self.assertIn('exec: malformed response. missing key',
context.exception.args[0])
@mock.patch('subprocess.Popen')
def test_mismatched_api_version(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
wrong_api_version = 'client.authentication.k8s.io/v1'
output = """
{
"apiVersion": "%s",
"kind": "ExecCredential",
"status": {
"token": "dummy"
}
}
""" % wrong_api_version
instance.communicate.return_value = (output, '')
with self.assertRaises(ConfigException) as context:
ep = ExecProvider(self.input_ok, None)
ep.run()
self.assertIn(
'exec: plugin api version %s does not match' %
wrong_api_version,
context.exception.args[0])
@mock.patch('subprocess.Popen')
def test_ok_01(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
instance.communicate.return_value = (self.output_ok, '')
ep = ExecProvider(self.input_ok, None)
result = ep.run()
self.assertTrue(isinstance(result, dict))
self.assertTrue('token' in result)
@mock.patch('subprocess.Popen')
def test_run_in_dir(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
instance.communicate.return_value = (self.output_ok, '')
ep = ExecProvider(self.input_ok, '/some/directory')
ep.run()
self.assertEqual(mock.call_args[1]['cwd'], '/some/directory')
@mock.patch('subprocess.Popen')
def test_ok_no_console_attached(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
instance.communicate.return_value = (self.output_ok, '')
mock_stdout = unittest.mock.patch(
'sys.stdout', new=None) # Simulate detached console
with mock_stdout:
ep = ExecProvider(self.input_ok, None)
result = ep.run()
self.assertTrue(isinstance(result, dict))
self.assertTrue('token' in result)
@mock.patch('subprocess.Popen')
def test_with_cluster_info(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
instance.communicate.return_value = (self.output_ok, '')
ep = ExecProvider(self.input_with_cluster, None, ConfigNode("cluster", {'server': 'name.company.com'}))
result = ep.run()
self.assertTrue(isinstance(result, dict))
self.assertTrue('token' in result)
obj = json.loads(mock.call_args.kwargs['env']['KUBERNETES_EXEC_INFO'])
self.assertEqual(obj['spec']['cluster']['server'], 'name.company.com')
@mock.patch("subprocess.Popen")
def test_with_cluster_info_from_exec_extension(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
instance.communicate.return_value = (self.output_ok, "")
ep = ExecProvider(
self.input_with_cluster,
None,
ConfigNode(
"cluster",
{
"server": "name.company.com",
"extensions": [
{
"name": "client.authentication.k8s.io/exec",
"extension": {
"namespace": "myproject",
"name": "mycluster",
},
},
],
},
),
)
result = ep.run()
self.assertTrue(isinstance(result, dict))
self.assertTrue("token" in result)
obj = json.loads(mock.call_args.kwargs["env"]["KUBERNETES_EXEC_INFO"])
self.assertEqual(obj["spec"]["cluster"]["server"], "name.company.com")
self.assertEqual(obj["spec"]["cluster"]["config"]["namespace"], "myproject")
self.assertEqual(obj["spec"]["cluster"]["config"]["name"], "mycluster")
if __name__ == '__main__':
unittest.main()
| ExecProviderTest |
python | davidhalter__jedi | test/completion/pep0484_generic_mismatches.py | {
"start": 187,
"end": 285
} | class ____(Generic[T]):
def __init__(self, val: T) -> None:
self.val = val
| CustomGeneric |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 14632,
"end": 20755
} | class ____:
def test_empty_html_charfield_with_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(default='happy')
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'message': 'happy'}
def test_empty_html_charfield_without_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_blank=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_without_default_not_required(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_blank=True, required=False)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_integerfield(self):
class TestSerializer(serializers.Serializer):
message = serializers.IntegerField(default=123)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': 123}
def test_empty_html_uuidfield_with_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.UUIDField(default=uuid.uuid4)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert list(serializer.validated_data) == ['message']
def test_empty_html_uuidfield_with_optional(self):
class TestSerializer(serializers.Serializer):
message = serializers.UUIDField(required=False)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert list(serializer.validated_data) == []
def test_empty_html_charfield_allow_null(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': None}
def test_empty_html_datefield_allow_null(self):
class TestSerializer(serializers.Serializer):
expiry = serializers.DateField(allow_null=True)
serializer = TestSerializer(data=QueryDict('expiry='))
assert serializer.is_valid()
assert serializer.validated_data == {'expiry': None}
def test_empty_html_charfield_allow_null_allow_blank(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True, allow_blank=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_required_false(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(required=False)
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {}
def test_querydict_list_input(self):
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(child=serializers.IntegerField())
serializer = TestSerializer(data=QueryDict('scores=1&scores=3'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': [1, 3]}
def test_querydict_list_input_only_one_input(self):
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(child=serializers.IntegerField())
serializer = TestSerializer(data=QueryDict('scores=1&'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': [1]}
def test_querydict_list_input_no_values_uses_default(self):
"""
When there are no values passed in, and default is set
The field should return the default value
"""
class TestSerializer(serializers.Serializer):
a = serializers.IntegerField(required=True)
scores = serializers.ListField(default=lambda: [1, 3])
serializer = TestSerializer(data=QueryDict('a=1&'))
assert serializer.is_valid()
assert serializer.validated_data == {'a': 1, 'scores': [1, 3]}
def test_querydict_list_input_supports_indexed_keys(self):
"""
When data is passed in the format `scores[0]=1&scores[1]=3`
The field should return the correct list, ignoring the default
"""
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(default=lambda: [1, 3])
serializer = TestSerializer(data=QueryDict("scores[0]=5&scores[1]=6"))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': ['5', '6']}
def test_querydict_list_input_no_values_no_default_and_not_required(self):
"""
When there are no keys passed, there is no default, and required=False
The field should be skipped
"""
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(required=False)
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {}
def test_querydict_list_input_posts_key_but_no_values(self):
"""
When there are no keys passed, there is no default, and required=False
The field should return an array of 1 item, blank
"""
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(required=False)
serializer = TestSerializer(data=QueryDict('scores=&'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': ['']}
| TestHTMLInput |
python | ipython__ipython | IPython/terminal/shortcuts/auto_suggest.py | {
"start": 906,
"end": 6035
} | class ____(Processor):
"""
Append the auto suggestion to lines other than the last (appending to the
last line is natively supported by the prompt toolkit).
This has a private `_debug` attribute that can be set to True to display
debug information as virtual suggestion on the end of any line. You can do
so with:
>>> from IPython.terminal.shortcuts.auto_suggest import AppendAutoSuggestionInAnyLine
>>> AppendAutoSuggestionInAnyLine._debug = True
"""
_debug: ClassVar[bool] = False
def __init__(self, style: str = "class:auto-suggestion") -> None:
self.style = style
def apply_transformation(self, ti: TransformationInput) -> Transformation:
"""
Apply transformation to the line that is currently being edited.
This is a variation of the original implementation in prompt toolkit
that allows to not only append suggestions to any line, but also to show
multi-line suggestions.
As transformation are applied on a line-by-line basis; we need to trick
a bit, and elide any line that is after the line we are currently
editing, until we run out of completions. We cannot shift the existing
lines
There are multiple cases to handle:
The completions ends before the end of the buffer:
We can resume showing the normal line, and say that some code may
be hidden.
The completions ends at the end of the buffer
We can just say that some code may be hidden.
And separately:
The completions ends beyond the end of the buffer
We need to both say that some code may be hidden, and that some
lines are not shown.
"""
last_line_number = ti.document.line_count - 1
is_last_line = ti.lineno == last_line_number
noop = lambda text: Transformation(
fragments=ti.fragments + [(self.style, " " + text if self._debug else "")]
)
if ti.document.line_count == 1:
return noop("noop:oneline")
if ti.document.cursor_position_row == last_line_number and is_last_line:
# prompt toolkit already appends something; just leave it be
return noop("noop:last line and cursor")
# first everything before the current line is unchanged.
if ti.lineno < ti.document.cursor_position_row:
return noop("noop:before cursor")
buffer = ti.buffer_control.buffer
if not buffer.suggestion or not ti.document.is_cursor_at_the_end_of_line:
return noop("noop:not eol")
delta = ti.lineno - ti.document.cursor_position_row
suggestions = buffer.suggestion.text.splitlines()
if len(suggestions) == 0:
return noop("noop: no suggestions")
if prompt_toolkit.VERSION < (3, 0, 49):
if len(suggestions) > 1 and prompt_toolkit.VERSION < (3, 0, 49):
if ti.lineno == ti.document.cursor_position_row:
return Transformation(
fragments=ti.fragments
+ [
(
"red",
"(Cannot show multiline suggestion; requires prompt_toolkit > 3.0.49)",
)
]
)
else:
return Transformation(fragments=ti.fragments)
elif len(suggestions) == 1:
if ti.lineno == ti.document.cursor_position_row:
return Transformation(
fragments=ti.fragments + [(self.style, suggestions[0])]
)
return Transformation(fragments=ti.fragments)
if delta == 0:
suggestion = suggestions[0]
return Transformation(fragments=ti.fragments + [(self.style, suggestion)])
if is_last_line:
if delta < len(suggestions):
suggestion = f"… rest of suggestion ({len(suggestions) - delta} lines) and code hidden"
return Transformation([(self.style, suggestion)])
n_elided = len(suggestions)
for i in range(len(suggestions)):
ll = ti.get_line(last_line_number - i)
el = "".join(l[1] for l in ll).strip()
if el:
break
else:
n_elided -= 1
if n_elided:
return Transformation([(self.style, f"… {n_elided} line(s) hidden")])
else:
return Transformation(
ti.get_line(last_line_number - len(suggestions) + 1)
+ ([(self.style, "shift-last-line")] if self._debug else [])
)
elif delta < len(suggestions):
suggestion = suggestions[delta]
return Transformation([(self.style, suggestion)])
else:
shift = ti.lineno - len(suggestions) + 1
return Transformation(ti.get_line(shift))
| AppendAutoSuggestionInAnyLine |
python | tensorflow__tensorflow | tensorflow/python/autograph/converters/control_flow.py | {
"start": 1599,
"end": 14345
} | class ____(converter.Base):
"""Transforms control flow structures like loops an conditionals."""
def visit_Lambda(self, node):
with self.state[_Function] as fn:
fn.scope = anno.getanno(node, anno.Static.SCOPE)
return self.generic_visit(node)
def visit_FunctionDef(self, node):
with self.state[_Function] as fn:
fn.scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
return self.generic_visit(node)
def _create_nonlocal_declarations(self, vars_):
vars_ = set(vars_)
results = []
global_vars = self.state[_Function].scope.globals & vars_
if global_vars:
results.append(gast.Global([str(v) for v in global_vars]))
nonlocal_vars = [
v for v in vars_ if not v.is_composite() and v not in global_vars]
if nonlocal_vars:
results.append(gast.Nonlocal([str(v) for v in nonlocal_vars]))
return results
def _create_state_functions(
self, block_vars, nonlocal_declarations, getter_name, setter_name):
if not block_vars:
template = """
def getter_name():
return ()
def setter_name(block_vars):
pass
"""
return templates.replace(
template, getter_name=getter_name, setter_name=setter_name)
guarded_block_vars = []
for v in block_vars:
if v.is_simple():
guarded_block_vars.append(v)
else:
guarded_block_vars.append(
templates.replace_as_expression(
'ag__.ldu(lambda: var_, name)',
var_=v,
name=gast.Constant(str(v), kind=None)))
template = """
def getter_name():
return guarded_state_vars,
def setter_name(vars_):
nonlocal_declarations
state_vars, = vars_
"""
return templates.replace(
template,
nonlocal_declarations=nonlocal_declarations,
getter_name=getter_name,
guarded_state_vars=guarded_block_vars,
setter_name=setter_name,
state_vars=tuple(block_vars))
def _create_loop_options(self, node):
if not anno.hasanno(node, anno.Basic.DIRECTIVES):
return gast.Dict([], [])
loop_directives = anno.getanno(node, anno.Basic.DIRECTIVES)
if directives.set_loop_options not in loop_directives:
return gast.Dict([], [])
opts_dict = loop_directives[directives.set_loop_options]
str_keys, values = zip(*opts_dict.items())
keys = [gast.Constant(s, kind=None) for s in str_keys]
values = list(values) # ast and gast don't play well with tuples.
return gast.Dict(keys, values)
def _create_undefined_assigns(self, undefined_symbols):
assignments = []
for s in undefined_symbols:
template = '''
var = ag__.Undefined(symbol_name)
'''
assignments += templates.replace(
template,
var=s,
symbol_name=gast.Constant(s.ssf(), kind=None))
return assignments
def _get_block_basic_vars(self, modified, live_in, live_out):
nonlocals = self.state[_Function].scope.nonlocals
basic_scope_vars = []
for s in modified:
if s.is_composite():
# TODO(mdan): Raise an error when this happens for a TF scope.
continue
# Variables not live into or out of the scope are considered local to the
# scope.
if s in live_in or s in live_out or s in nonlocals:
basic_scope_vars.append(s)
continue
return frozenset(basic_scope_vars)
def _get_block_composite_vars(self, modified, live_in):
# The scope variables corresponding to composite symbols (e.g. `self.x`).
composite_scope_vars = []
for s in modified:
if not s.is_composite():
continue
# Mutations made to objects created inside the scope will appear as writes
# to composite symbols. Because these mutations appear as modifications
# made to composite symbols, we check whether the composite's parent is
# actually live into the scope.
# Example:
# while cond:
# x = Foo()
# x.foo = 2 * x.foo # x.foo is live into the scope, but x is not.
#
# Note that some parents might not be symbols - for example, in x['foo'],
# 'foo' is a parent, but it's a literal, not a symbol. We don't check the
# liveness of literals.
support_set_symbols = tuple(
sss for sss in s.support_set if sss.is_symbol())
if not all(sss in live_in for sss in support_set_symbols):
continue
composite_scope_vars.append(s)
return frozenset(composite_scope_vars)
def _get_block_vars(self, node, modified):
"""Determines the variables affected inside a control flow statement."""
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
fn_scope = self.state[_Function].scope
basic_scope_vars = self._get_block_basic_vars(
modified,
live_in,
live_out)
composite_scope_vars = self._get_block_composite_vars(modified, live_in)
scope_vars = tuple(basic_scope_vars | composite_scope_vars)
# Variables that are modified inside the scope, but not defined
# before entering it. Only simple variables must be defined. The
# composite ones will be implicitly checked at runtime.
possibly_undefined = (
modified - defined_in - fn_scope.globals - fn_scope.nonlocals)
undefined = tuple(v for v in possibly_undefined if not v.is_composite())
# Variables that are modified inside the scope, and depend on values outside
# it.
input_only = basic_scope_vars & live_in - live_out
# Place the outputs first, then sort lexicographically.
scope_vars = sorted(scope_vars, key=lambda v: (v in input_only, v))
nouts = len(scope_vars) - len(input_only)
return scope_vars, undefined, nouts
def visit_If(self, node):
node = self.generic_visit(node)
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)
cond_vars, undefined, nouts = self._get_block_vars(
node, body_scope.bound | orelse_scope.bound)
undefined_assigns = self._create_undefined_assigns(undefined)
nonlocal_declarations = self._create_nonlocal_declarations(cond_vars)
reserved = body_scope.referenced | orelse_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved)
state_functions = self._create_state_functions(
cond_vars, nonlocal_declarations, state_getter_name, state_setter_name)
orelse_body = node.orelse
if not orelse_body:
orelse_body = [gast.Pass()]
template = """
state_functions
def body_name():
nonlocal_declarations
body
def orelse_name():
nonlocal_declarations
orelse
undefined_assigns
ag__.if_stmt(
test,
body_name,
orelse_name,
state_getter_name,
state_setter_name,
(symbol_names,),
nouts)
"""
new_nodes = templates.replace(
template,
body=node.body,
body_name=self.ctx.namer.new_symbol('if_body', reserved),
orelse=orelse_body,
orelse_name=self.ctx.namer.new_symbol('else_body', reserved),
nonlocal_declarations=nonlocal_declarations,
nouts=gast.Constant(nouts, kind=None),
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
symbol_names=tuple(gast.Constant(str(s), kind=None) for s in cond_vars),
test=node.test,
undefined_assigns=undefined_assigns)
origin_info.copy_origin(node, new_nodes[-1])
return new_nodes
def visit_While(self, node):
node = self.generic_visit(node)
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
loop_vars, undefined, _ = self._get_block_vars(node, body_scope.bound)
undefined_assigns = self._create_undefined_assigns(undefined)
nonlocal_declarations = self._create_nonlocal_declarations(loop_vars)
reserved = body_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved)
state_functions = self._create_state_functions(
loop_vars, nonlocal_declarations, state_getter_name, state_setter_name)
opts = self._create_loop_options(node)
template = """
state_functions
def body_name():
nonlocal_declarations
body
def test_name():
return test
undefined_assigns
ag__.while_stmt(
test_name,
body_name,
state_getter_name,
state_setter_name,
(symbol_names,),
opts)
"""
new_nodes = templates.replace(
template,
body=node.body,
body_name=self.ctx.namer.new_symbol('loop_body', reserved),
nonlocal_declarations=nonlocal_declarations,
opts=opts,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
symbol_names=tuple(gast.Constant(str(s), kind=None) for s in loop_vars),
test=node.test,
test_name=self.ctx.namer.new_symbol('loop_test', reserved),
undefined_assigns=undefined_assigns)
origin_info.copy_origin(node, new_nodes[-1])
return new_nodes
def visit_For(self, node):
node = self.generic_visit(node)
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
iter_scope = anno.getanno(node, annos.NodeAnno.ITERATE_SCOPE)
loop_vars, undefined, _ = self._get_block_vars(
node, body_scope.bound | iter_scope.bound)
undefined_assigns = self._create_undefined_assigns(undefined)
nonlocal_declarations = self._create_nonlocal_declarations(loop_vars)
reserved = body_scope.referenced | iter_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved)
state_functions = self._create_state_functions(
loop_vars, nonlocal_declarations, state_getter_name, state_setter_name)
opts = self._create_loop_options(node)
opts.keys.append(gast.Constant('iterate_names', kind=None))
opts.values.append(gast.Constant(
parser.unparse(node.target, include_encoding_marker=False), kind=None))
if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST):
extra_test = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST)
extra_test_name = self.ctx.namer.new_symbol(
'extra_test', reserved)
template = """
def extra_test_name():
nonlocal_declarations
return extra_test_expr
"""
extra_test_function = templates.replace(
template,
extra_test_expr=extra_test,
extra_test_name=extra_test_name,
loop_vars=loop_vars,
nonlocal_declarations=nonlocal_declarations)
else:
extra_test_name = parser.parse_expression('None')
extra_test_function = []
# iterate_arg_name holds a single arg with the iterates, which may be a
# tuple.
iterate_arg_name = self.ctx.namer.new_symbol('itr', reserved)
template = """
iterates = iterate_arg_name
"""
iterate_expansion = templates.replace(
template, iterate_arg_name=iterate_arg_name, iterates=node.target)
origin_info.copy_origin(node, iterate_expansion)
template = """
state_functions
def body_name(iterate_arg_name):
nonlocal_declarations
iterate_expansion
body
extra_test_function
undefined_assigns
ag__.for_stmt(
iterated,
extra_test_name,
body_name,
state_getter_name,
state_setter_name,
(symbol_names,),
opts)
"""
new_nodes = templates.replace(
template,
body=node.body,
body_name=self.ctx.namer.new_symbol('loop_body', reserved),
extra_test_function=extra_test_function,
extra_test_name=extra_test_name,
iterate_arg_name=iterate_arg_name,
iterate_expansion=iterate_expansion,
iterated=node.iter,
nonlocal_declarations=nonlocal_declarations,
opts=opts,
symbol_names=tuple(gast.Constant(str(s), kind=None) for s in loop_vars),
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
undefined_assigns=undefined_assigns)
origin_info.copy_origin(node, new_nodes[-1])
return new_nodes
| ControlFlowTransformer |
python | faif__python-patterns | patterns/structural/flyweight_with_metaclass.py | {
"start": 1117,
"end": 1718
} | class ____(metaclass=FlyweightMeta):
def __init__(self, *args, **kwargs):
# print('Init {}: {}'.format(self.__class__, (args, kwargs)))
pass
if __name__ == "__main__":
instances_pool = getattr(Card2, "pool")
cm1 = Card2("10", "h", a=1)
cm2 = Card2("10", "h", a=1)
cm3 = Card2("10", "h", a=2)
assert (cm1 == cm2) and (cm1 != cm3)
assert (cm1 is cm2) and (cm1 is not cm3)
assert len(instances_pool) == 2
del cm1
assert len(instances_pool) == 2
del cm2
assert len(instances_pool) == 1
del cm3
assert len(instances_pool) == 0
| Card2 |
python | conda__conda | conda/testing/notices/helpers.py | {
"start": 2678,
"end": 3389
} | class ____:
"""Dummy object that sets all kwargs as object properties."""
def __init__(self, **kwargs):
self.no_ansi_colors = True
for key, val in kwargs.items():
setattr(self, key, val)
def notices_decorator_assert_message_in_stdout(
captured,
messages: Sequence[str],
dummy_mesg: str | None = None,
not_in: bool = False,
):
"""
Tests a run of notices decorator where we expect to see the messages
print to stdout.
"""
assert captured.err == ""
assert dummy_mesg in captured.out
for mesg in messages:
if not_in:
assert mesg not in captured.out
else:
assert mesg in captured.out
| DummyArgs |
python | google__flatbuffers | tests/monster_test_generated.py | {
"start": 1135,
"end": 1693
} | class ____(object):
NONE = 0
M = 1
TS = 2
M2 = 3
def AnyUniqueAliasesCreator(unionType, table):
from flatbuffers.table import Table
if not isinstance(table, Table):
return None
if unionType == AnyUniqueAliases.M:
return MonsterT.InitFromBuf(table.Bytes, table.Pos)
if unionType == AnyUniqueAliases.TS:
return TestSimpleTableWithEnumT.InitFromBuf(table.Bytes, table.Pos)
if unionType == AnyUniqueAliases.M2:
return MonsterT.InitFromBuf(table.Bytes, table.Pos)
return None
| AnyUniqueAliases |
python | RaRe-Technologies__gensim | gensim/models/lsi_dispatcher.py | {
"start": 2245,
"end": 9903
} | class ____:
"""Dispatcher object that communicates and coordinates individual workers.
Warnings
--------
There should never be more than one dispatcher running at any one time.
"""
def __init__(self, maxsize=0):
"""Partly initialize the dispatcher.
A full initialization (including initialization of the workers) requires a call to
:meth:`~gensim.models.lsi_dispatcher.Dispatcher.initialize`
Parameters
----------
maxsize : int, optional
Maximum number of jobs to be kept pre-fetched in the queue.
"""
self.maxsize = maxsize
self.workers = {}
self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)
@Pyro4.expose
def initialize(self, **model_params):
"""Fully initialize the dispatcher and all its workers.
Parameters
----------
**model_params
Keyword parameters used to initialize individual workers
(gets handed all the way down to :meth:`gensim.models.lsi_worker.Worker.initialize`).
See :class:`~gensim.models.lsimodel.LsiModel`.
Raises
------
RuntimeError
When no workers are found (the :mod:`gensim.model.lsi_worker` script must be ran beforehand).
"""
self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
# locate all available workers and store their proxies, for subsequent RMI calls
self.workers = {}
with utils.getNS() as ns:
self.callback = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher') # = self
for name, uri in ns.list(prefix='gensim.lsi_worker').items():
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
# make time consuming methods work asynchronously
logger.info("registering worker #%i from %s", workerid, uri)
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.exception("unresponsive worker at %s, deleting it from the name server", uri)
ns.remove(name)
if not self.workers:
raise RuntimeError('no workers found; run some lsi_worker scripts on your machines first!')
@Pyro4.expose
def getworkers(self):
"""Get pyro URIs of all registered workers.
Returns
-------
list of URIs
The pyro URIs for each worker.
"""
return [worker._pyroUri for worker in self.workers.values()]
@Pyro4.expose
def getjob(self, worker_id):
"""Atomically pop a job from the queue.
Parameters
----------
worker_id : int
The worker that requested the job.
Returns
-------
iterable of iterable of (int, float)
The corpus in BoW format.
"""
logger.info("worker #%i requesting a new job", worker_id)
job = self.jobs.get(block=True, timeout=1)
logger.info("worker #%i got a new job (%i left)", worker_id, self.jobs.qsize())
return job
@Pyro4.expose
def putjob(self, job):
"""Atomically add a job to the queue.
Parameters
----------
job : iterable of list of (int, float)
The corpus in BoW format.
"""
self._jobsreceived += 1
self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
logger.info("added a new job (len(queue)=%i items)", self.jobs.qsize())
@Pyro4.expose
def getstate(self):
"""Merge projections from across all workers and get the final projection.
Returns
-------
:class:`~gensim.models.lsimodel.Projection`
The current projection of the total model.
"""
logger.info("end of input, assigning all remaining jobs")
logger.debug("jobs done: %s, jobs received: %s", self._jobsdone, self._jobsreceived)
while self._jobsdone < self._jobsreceived:
time.sleep(0.5) # check every half a second
# TODO: merge in parallel, so that we're done in `log_2(workers)` merges,
# and not `workers - 1` merges!
# but merging only takes place once, after all input data has been processed,
# so the overall effect would be small... compared to the amount of coding :-)
logger.info("merging states from %i workers", len(self.workers))
workers = list(self.workers.items())
result = workers[0][1].getstate()
for workerid, worker in workers[1:]:
logger.info("pulling state from worker %s", workerid)
result.merge(worker.getstate())
logger.info("sending out merged projection")
return result
@Pyro4.expose
def reset(self):
"""Re-initialize all workers for a new decomposition."""
for workerid, worker in self.workers.items():
logger.info("resetting worker %s", workerid)
worker.reset()
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
@Pyro4.expose
@Pyro4.oneway
@utils.synchronous('lock_update')
def jobdone(self, workerid):
"""A worker has finished its job. Log this event and then asynchronously transfer control back to the worker.
Callback used by workers to notify when their job is done.
The job done event is logged and then control is asynchronously transfered back to the worker
(who can then request another job). In this way, control flow basically oscillates between
:meth:`gensim.models.lsi_dispatcher.Dispatcher.jobdone` and :meth:`gensim.models.lsi_worker.Worker.requestjob`.
Parameters
----------
workerid : int
The ID of the worker that finished the job (used for logging).
"""
self._jobsdone += 1
logger.info("worker #%s finished job #%i", workerid, self._jobsdone)
worker = self.workers[workerid]
worker.requestjob() # tell the worker to ask for another job, asynchronously (one-way)
def jobsdone(self):
"""Wrap :attr:`~gensim.models.lsi_dispatcher.Dispatcher._jobsdone`, needed for remote access through proxies.
Returns
-------
int
Number of jobs already completed.
"""
return self._jobsdone
@Pyro4.oneway
def exit(self):
"""Terminate all registered workers and then the dispatcher."""
for workerid, worker in self.workers.items():
logger.info("terminating worker %s", workerid)
worker.exit()
logger.info("terminating dispatcher")
os._exit(0) # exit the whole process (not just this thread ala sys.exit())
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'maxsize',
nargs='?',
type=int,
help='Maximum number of jobs to be kept pre-fetched in the queue.',
default=MAX_JOBS_QUEUE,
)
args = parser.parse_args()
logger.info("running %s", " ".join(sys.argv))
utils.pyro_daemon('gensim.lsi_dispatcher', Dispatcher(maxsize=args.maxsize))
logger.info("finished running %s", parser.prog)
| Dispatcher |
python | pytorch__pytorch | torch/_dynamo/graph_region_tracker.py | {
"start": 6463,
"end": 13888
} | class ____:
"""
GraphRegionTracker tracks each node added to the output graph and generates a key based on the source location,
instruction pointer, input shapes, and global state at the time the node is inserted into the graph. Nodes with
the same key are grouped together in a list of identical nodes (the value of node_to_duplicates).
hash_to_duplicates: Dict[str, IdenticalNodes] - A dictionary mapping the key to a list of identical nodes
node_to_duplicates: Dict[Node, IdenticalNodes] - A dictionary mapping a node to the list of identical nodes it belongs to
input_pickler: InputPickler - An instance of InputPickler used to generate a node hash
"""
def __init__(self) -> None:
self.hash_to_duplicates: dict[str, IdenticalNodes] = defaultdict(list)
self.node_to_duplicates: dict[Node, IdenticalNodes] = {}
# Note: position is in flattened args/kwargs list
self.node_to_mutated_arg_positions: dict[Node, OrderedSet[int]] = {}
self.input_pickler = InputPickler()
def _hash_node(
self, filename: str, lineno: int, instruction_pointer: Optional[int], node: Node
) -> str:
from torch._inductor.codecache import sha256_hash
key = (
get_global_state_key(),
filename,
lineno,
instruction_pointer,
_normalize_args(node),
)
return sha256_hash(self.input_pickler.dumps(key))
def _is_identical(self, n0: Node, n1: Node) -> bool:
return (
n0 in self.node_to_duplicates
and n1 in self.node_to_duplicates
and self.node_to_duplicates[n0] is self.node_to_duplicates[n1]
and n0 is not n1
)
def track_node(self, tx: InstructionTranslatorBase, node: Node) -> None:
"""
The main entry point for tracking a node. This function will hash the node argument and group
nodes with the same hash together. It updates the hash_to_duplicates and node_to_duplicates dictionaries
to track the new node.
"""
try:
if (
node not in self.node_to_duplicates
): # don't allow nodes to be added twice
duplicates = self.hash_to_duplicates[
self._hash_node(
tx.f_code.co_filename, tx.lineno, tx.instruction_pointer, node
)
]
duplicates.append(node)
self.node_to_duplicates[node] = duplicates
except NodeHashException as e:
log.debug("Unable to hash node %s with exception %s", node, e) # noqa: G200
def track_node_mutations(
self,
node: Node,
flat_args_kwargs: list[Any],
id_to_initial_version: dict[int, int],
) -> None:
"""
This function tracks which argument positions are mutated by the given node. Subgraph HOP does not support
input mutations today so we will skip regions which have inputs that are mutated.
"""
mutated_arg_positions = OrderedSet[int]()
for i, arg in enumerate(flat_args_kwargs):
val_id = id(arg)
if (
val_id in id_to_initial_version
and id_to_initial_version[val_id] != arg._version
):
mutated_arg_positions.add(i)
if mutated_arg_positions:
self.node_to_mutated_arg_positions[node] = mutated_arg_positions
def add_node_mutation(
self,
node: Node,
arg_pos: int,
) -> None:
if node in self.node_to_mutated_arg_positions:
self.node_to_mutated_arg_positions[node].add(arg_pos)
else:
self.node_to_mutated_arg_positions[node] = OrderedSet([arg_pos])
def get_identical_regions(self, graph: torch.fx.Graph) -> list[list[Region]]:
"""
This function is responsible for extracting the largest regions of identical nodes from the given graph.
**Note**: This function assumes the nodes that have been tracked with track_node are in the provided graph argument.
The algorithm proceeds as follows:
The nodes tracked via track_node above are organized into region groups. The initial region groups look like this:
[[IdenticalNode1], [IdenticalNode2], [IdenticalNode3]] and each sublist is called a region. For each region group
(starting at the topologically latest region group), the inner regions are gradually expanded one node at time from
the flattened args and kwargs of the node in each region provided that for all regions in the group, the nodes being
added are also identical (ie have the same key computed by track_node). This is checked by verifying that the two
nodes have the same identical node list in node_to_duplicates.
"""
topological_ranking = {node: i for i, node in enumerate(graph.nodes)}
region_groups_with_rank = []
# needed to detect if replacing a region will create cycles
node_to_recursive_ancestors = _populate_recursive_ancestor_map(graph)
# Create region groups; a region group is a group
# of regions that are all identical. In this initial state
# each region in the group is a single node, and we discard
# groups that are only a single region.
# We track the topological ranking to start with groups later in the graph
# the reason for this is that we will necessarily create the largest groups first.
for group in self.hash_to_duplicates.values():
if len(group) > 1:
region_group = []
min_rank = math.inf
# pyrefly: ignore [bad-assignment]
for node in group:
# some nodes aren't in the topo ranking?
if node in topological_ranking:
min_rank = min(min_rank, topological_ranking[node])
region_group.append([node])
if len(region_group) > 1:
region_groups_with_rank.append((region_group, min_rank))
region_groups_with_rank.sort(key=lambda rg: -rg[1])
region_groups = [rg for rg, _ in region_groups_with_rank]
# We start from regions later in the graph and expand them earlier
# as a result, we will create the largest regions first and they won't
# overlap.
seen_nodes: set[Node] = set()
for region_group in region_groups:
fully_expand_region_group(
region_group,
seen_nodes,
node_to_recursive_ancestors,
self._is_identical,
)
# sort topologically
# we need to handle edge cases where some nodes have no dependencies
# so first we map each node to its ranking,
ref_region = region_group[0]
index_to_rank = {
index: topological_ranking[n] for index, n in enumerate(ref_region)
}
_sort_with_ref_region(index_to_rank, region_group)
return [
region_group for region_group in region_groups if len(region_group[0]) > 1
]
def __str__(self) -> str:
return f"GraphRegionTracker(hash_to_duplicates={self.hash_to_duplicates}, node_to_duplicates={self.node_to_duplicates})"
| GraphRegionTracker |
python | google__jax | jax/_src/interpreters/ad.py | {
"start": 25875,
"end": 27444
} | class ____(GradAccum):
aval: core.AbstractValue
val: Array | Zero
def __init__(self, aval, val=None):
self.aval = aval
self.val = Zero(aval) if val is None else val
def accum(self, x):
if x is not None:
self.val = add_tangents(self.val, x)
def freeze(self):
return self.val
# class NullAccum(GradAccum):
# aval: core.AbstractValue
# def __init__(self, aval): self.aval = aval
# def accum(self, x): return
# def freeze(self): assert False
fancy_transposes: dict[core.Primitive, Callable] = {}
def project_accums(args):
result, specs = [], []
for x in args:
if isinstance(x, ValAccum):
specs.append((ValAccum, x.aval))
elif isinstance(x, RefAccum):
specs.append((RefAccum, x.aval))
result.append(x.inst().ref)
else:
specs.append((None, typeof(x)))
result.append(x)
return result, tuple(specs)
def unproject_accums(specs, result):
args, result_ = [], iter(result)
for k, aval in specs:
if k is ValAccum:
args.append(ValAccum(aval))
elif k is RefAccum:
args.append(RefAccum(aval, next(result_)))
elif k is None:
args.append(next(result_))
else:
assert False
assert next(result_, None) is None
return args
def accum_typeof(x):
if isinstance(x, GradAccum):
return x.aval
else:
return core.typeof(x)
@lu.transformation_with_aux2
def nonzero_tangent_outputs(f, store, *args, **kwargs):
results = (_, tangents_out) = f(*args, **kwargs)
store.store([type(r) is not Zero for r in tangents_out])
return results
| ValAccum |
python | huggingface__transformers | src/transformers/models/tvp/modeling_tvp.py | {
"start": 22670,
"end": 23963
} | class ____(PreTrainedModel):
config: TvpConfig
base_model_prefix = "model"
input_modalities = ("video", "text")
supports_gradient_checkpointing = True
@torch.no_grad()
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Embedding)):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Conv2d):
init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None:
init.constant_(module.bias, 0)
elif isinstance(module, TvpModel):
init.normal_(module.text_prompt)
if isinstance(module, nn.Linear) and module.bias is not None:
init.zeros_(module.bias)
if hasattr(module, "pad_up"):
init.normal_(module.pad_up)
if hasattr(module, "pad_down"):
init.normal_(module.pad_down)
if hasattr(module, "pad_left"):
init.normal_(module.pad_left)
if hasattr(module, "pad_right"):
init.normal_(module.pad_right)
| TvpPreTrainedModel |
python | openai__openai-python | tests/test_transform.py | {
"start": 958,
"end": 1247
} | class ____(TypedDict):
foo_bar: Annotated[str, PropertyInfo(alias="fooBar")]
@parametrize
@pytest.mark.asyncio
async def test_top_level_alias(use_async: bool) -> None:
assert await transform({"foo_bar": "hello"}, expected_type=Foo1, use_async=use_async) == {"fooBar": "hello"}
| Foo1 |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/container_registry.py | {
"start": 1295,
"end": 4926
} | class ____(BaseHook):
"""
A hook to communicate with a Azure Container Registry.
:param conn_id: :ref:`Azure Container Registry connection id<howto/connection:acr>`
of a service principal which will be used to start the container instance
"""
conn_name_attr = "azure_container_registry_conn_id"
default_conn_name = "azure_container_registry_default"
conn_type = "azure_container_registry"
hook_name = "Azure Container Registry"
@classmethod
@add_managed_identity_connection_widgets
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"subscription_id": StringField(
lazy_gettext("Subscription ID (optional)"),
widget=BS3TextFieldWidget(),
),
"resource_group": StringField(
lazy_gettext("Resource group name (optional)"),
widget=BS3TextFieldWidget(),
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "extra"],
"relabeling": {
"login": "Registry Username",
"password": "Registry Password",
"host": "Registry Server",
},
"placeholders": {
"login": "private registry username",
"password": "private registry password",
"host": "docker image registry server",
"subscription_id": "Subscription id (required for Azure AD authentication)",
"resource_group": "Resource group name (required for Azure AD authentication)",
},
}
def __init__(self, conn_id: str = "azure_registry") -> None:
super().__init__()
self.conn_id = conn_id
def _get_field(self, extras, name):
return get_field(
conn_id=self.conn_id,
conn_type=self.conn_type,
extras=extras,
field_name=name,
)
@cached_property
def connection(self) -> ImageRegistryCredential:
return self.get_conn()
def get_conn(self) -> ImageRegistryCredential:
conn = self.get_connection(self.conn_id)
password = conn.password
if not password:
extras = conn.extra_dejson
subscription_id = self._get_field(extras, "subscription_id")
resource_group = self._get_field(extras, "resource_group")
managed_identity_client_id = self._get_field(extras, "managed_identity_client_id")
workload_identity_tenant_id = self._get_field(extras, "workload_identity_tenant_id")
credential = get_sync_default_azure_credential(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
client = ContainerRegistryManagementClient(
credential=credential,
subscription_id=subscription_id,
)
credentials = client.registries.list_credentials(resource_group, conn.login).as_dict()
password = credentials["passwords"][0]["value"]
return ImageRegistryCredential(
server=cast("str", conn.host), username=cast("str", conn.login), password=password
)
| AzureContainerRegistryHook |
python | huggingface__transformers | templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py | {
"start": 1782,
"end": 3077
} | class ____:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
{%- elif cookiecutter.can_train_from_scratch == "False" %}
@dataclass
| ModelArguments |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/panels/sql/utils.py | {
"start": 2714,
"end": 5161
} | class ____:
"""sqlparse post-processor to convert a Statement into a string escaped for
inclusion in HTML ."""
@staticmethod
def process(stmt):
return "".join(escaped_value(token) for token in stmt.flatten())
def is_select_query(sql):
# UNION queries can start with "(".
return sql.lower().lstrip(" (").startswith("select")
def reformat_sql(sql, *, with_toggle=False):
formatted = parse_sql(sql)
if not with_toggle:
return formatted
simplified = parse_sql(sql, simplify=True)
uncollapsed = f'<span class="djDebugUncollapsed">{simplified}</span>'
collapsed = f'<span class="djDebugCollapsed djdt-hidden">{formatted}</span>'
return collapsed + uncollapsed
@lru_cache(maxsize=128)
def parse_sql(sql, *, simplify=False):
stack = get_filter_stack(simplify=simplify)
return "".join(stack.run(sql))
@cache
def get_filter_stack(*, simplify):
stack = sqlparse.engine.FilterStack()
if simplify:
stack.preprocess.append(ElideSelectListsFilter())
else:
if dt_settings.get_config()["PRETTIFY_SQL"]:
stack.enable_grouping()
stack.stmtprocess.append(
sqlparse.filters.AlignedIndentFilter(char=" ", n="<br/>")
)
stack.stmtprocess.append(BoldKeywordFilter())
stack.postprocess.append(EscapedStringSerializer()) # Statement -> str
return stack
@receiver(setting_changed)
def clear_caches(*, setting, **kwargs):
if setting == "DEBUG_TOOLBAR_CONFIG":
parse_sql.cache_clear()
get_filter_stack.cache_clear()
def contrasting_color_generator():
"""
Generate contrasting colors by varying most significant bit of RGB first,
and then vary subsequent bits systematically.
"""
def rgb_to_hex(rgb):
return "#{:02x}{:02x}{:02x}".format(*tuple(rgb))
triples = [
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(1, 1, 0),
(0, 1, 1),
(1, 0, 1),
(1, 1, 1),
]
n = 1 << 7
so_far = [[0, 0, 0]]
while True:
if n == 0: # This happens after 2**24 colours; presumably, never
yield "#000000" # black
copy_so_far = list(so_far)
for triple in triples:
for previous in copy_so_far:
rgb = [n * triple[i] + previous[i] for i in range(3)]
so_far.append(rgb)
yield rgb_to_hex(rgb)
n >>= 1
| EscapedStringSerializer |
python | ansible__ansible | lib/ansible/modules/group.py | {
"start": 10654,
"end": 12267
} | class ____(Group):
"""
This is a AIX Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'AIX'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('rmgroup', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('mkgroup', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('id=' + str(kwargs[key]))
elif key == 'system' and kwargs[key] is True:
cmd.append('-a')
if self.gid_min is not None:
cmd.append('-K')
cmd.append('GID_MIN=' + str(self.gid_min))
if self.gid_max is not None:
cmd.append('-K')
cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('chgroup', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('id=' + str(kwargs[key]))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
| AIX |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 1986,
"end": 2020
} | class ____[**P = [int]]: ...
| ClassP1 |
python | tensorflow__tensorflow | tensorflow/python/training/server_lib.py | {
"start": 9340,
"end": 18031
} | class ____:
"""Represents a cluster as a set of "tasks", organized into "jobs".
A `tf.train.ClusterSpec` represents the set of processes that
participate in a distributed TensorFlow computation. Every
`tf.distribute.Server` is constructed in a particular cluster.
To create a cluster with two jobs and five tasks, you specify the
mapping from job names to lists of network addresses (typically
hostname-port pairs).
```python
cluster = tf.train.ClusterSpec({"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"],
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]})
```
Each job may also be specified as a sparse mapping from task indices
to network addresses. This enables a server to be configured without
needing to know the identity of (for example) all other worker
tasks:
```python
cluster = tf.train.ClusterSpec({"worker": {1: "worker1.example.com:2222"},
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]})
```
"""
def __init__(self, cluster):
"""Creates a `ClusterSpec`.
Args:
cluster: A dictionary mapping one or more job names to (i) a list of
network addresses, or (ii) a dictionary mapping integer task indices to
network addresses; or a `tf.train.ClusterDef` protocol buffer.
Raises:
TypeError: If `cluster` is not a dictionary mapping strings to lists
of strings, and not a `tf.train.ClusterDef` protobuf.
"""
if isinstance(cluster, dict):
self._cluster_spec = {}
for job_name, tasks in cluster.items():
if isinstance(tasks, (list, tuple)):
job_tasks = {i: task for i, task in enumerate(tasks)}
elif isinstance(tasks, dict):
job_tasks = {int(i): task for i, task in tasks.items()}
else:
raise TypeError("The tasks for job %r must be a list or a dictionary "
"from integers to strings." % job_name)
self._cluster_spec[job_name] = job_tasks
self._make_cluster_def()
elif isinstance(cluster, cluster_pb2.ClusterDef):
self._cluster_def = cluster
self._cluster_spec = {}
for job_def in self._cluster_def.job:
self._cluster_spec[job_def.name] = {
i: t for i, t in job_def.tasks.items()
}
elif isinstance(cluster, ClusterSpec):
self._cluster_def = cluster_pb2.ClusterDef()
self._cluster_def.MergeFrom(cluster.as_cluster_def())
self._cluster_spec = {}
for job_def in self._cluster_def.job:
self._cluster_spec[job_def.name] = {
i: t for i, t in job_def.tasks.items()
}
else:
raise TypeError("`cluster` must be a dictionary mapping one or more "
"job names to lists of network addresses, or a "
"`ClusterDef` protocol buffer")
def __bool__(self):
return bool(self._cluster_spec)
# Python 2.x
__nonzero__ = __bool__
def __eq__(self, other):
return self._cluster_spec == other
def __ne__(self, other):
return self._cluster_spec != other
def __repr__(self):
key_values = self.as_dict()
string_items = [
repr(k) + ": " + repr(key_values[k]) for k in sorted(key_values)
]
return "ClusterSpec({" + ", ".join(string_items) + "})"
def as_dict(self):
"""Returns a dictionary from job names to their tasks.
For each job, if the task index space is dense, the corresponding
value will be a list of network addresses; otherwise it will be a
dictionary mapping (sparse) task indices to the corresponding
addresses.
Returns:
A dictionary mapping job names to lists or dictionaries
describing the tasks in those jobs.
"""
ret = {}
for job in self.jobs:
task_indices = self.task_indices(job)
if len(task_indices) == 0:
ret[job] = {}
continue
if max(task_indices) + 1 == len(task_indices):
# Return a list because the task indices are dense. This
# matches the behavior of `as_dict()` before support for
# sparse jobs was added.
ret[job] = self.job_tasks(job)
else:
ret[job] = {i: self.task_address(job, i) for i in task_indices}
return ret
def as_cluster_def(self):
"""Returns a `tf.train.ClusterDef` protocol buffer based on this cluster."""
return self._cluster_def
@property
def jobs(self):
"""Returns a list of job names in this cluster.
Returns:
A list of strings, corresponding to the names of jobs in this cluster.
"""
return list(self._cluster_spec.keys())
def num_tasks(self, job_name):
"""Returns the number of tasks defined in the given job.
Args:
job_name: The string name of a job in this cluster.
Returns:
The number of tasks defined in the given job.
Raises:
ValueError: If `job_name` does not name a job in this cluster.
"""
try:
job = self._cluster_spec[job_name]
except KeyError:
raise ValueError("No such job in cluster: %r" % job_name)
return len(job)
def task_indices(self, job_name):
"""Returns a list of valid task indices in the given job.
Args:
job_name: The string name of a job in this cluster.
Returns:
A list of valid task indices in the given job.
Raises:
ValueError: If `job_name` does not name a job in this cluster,
or no task with index `task_index` is defined in that job.
"""
try:
job = self._cluster_spec[job_name]
except KeyError:
raise ValueError("No such job in cluster: %r" % job_name)
return list(sorted(job.keys()))
def task_address(self, job_name, task_index):
"""Returns the address of the given task in the given job.
Args:
job_name: The string name of a job in this cluster.
task_index: A non-negative integer.
Returns:
The address of the given task in the given job.
Raises:
ValueError: If `job_name` does not name a job in this cluster,
or no task with index `task_index` is defined in that job.
"""
try:
job = self._cluster_spec[job_name]
except KeyError:
raise ValueError("No such job in cluster: %r" % job_name)
try:
return job[task_index]
except KeyError:
raise ValueError("No task with index %r in job %r" %
(task_index, job_name))
def job_tasks(self, job_name):
"""Returns a mapping from task ID to address in the given job.
NOTE: For backwards compatibility, this method returns a list. If
the given job was defined with a sparse set of task indices, the
length of this list may not reflect the number of tasks defined in
this job. Use the `tf.train.ClusterSpec.num_tasks` method
to find the number of tasks defined in a particular job.
Args:
job_name: The string name of a job in this cluster.
Returns:
A list of task addresses, where the index in the list
corresponds to the task index of each task. The list may contain
`None` if the job was defined with a sparse set of task indices.
Raises:
ValueError: If `job_name` does not name a job in this cluster.
"""
try:
job = self._cluster_spec[job_name]
except KeyError:
raise ValueError("No such job in cluster: %r" % job_name)
ret = [None for _ in range(max(job.keys()) + 1)]
for i, task in job.items():
ret[i] = task
return ret
def _make_cluster_def(self):
"""Creates a `tf.train.ClusterDef` based on the given `cluster_spec`.
Raises:
TypeError: If `cluster_spec` is not a dictionary mapping strings to lists
of strings.
"""
self._cluster_def = cluster_pb2.ClusterDef()
# NOTE(mrry): Sort by job_name to produce deterministic protobufs.
for job_name, tasks in sorted(self._cluster_spec.items()):
try:
job_name = compat.as_bytes(job_name)
except TypeError:
raise TypeError("Job name %r must be bytes or unicode" % job_name)
job_def = self._cluster_def.job.add()
job_def.name = job_name
for i, task_address in sorted(tasks.items()):
try:
task_address = compat.as_bytes(task_address)
except TypeError:
raise TypeError("Task address %r must be bytes or unicode" %
task_address)
job_def.tasks[i] = task_address
@tf_export("config.experimental.ClusterDeviceFilters")
| ClusterSpec |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 33221,
"end": 33883
} | class ____(TestBasicOps, TestCase):
def setUp(self):
super().setUp()
warnings.simplefilter("ignore", BytesWarning)
self.case = "string and bytes OrderedSet"
self.values = ["a", "b", b"a", b"b"]
self.OrderedSet = OrderedSet(self.values)
self.dup = OrderedSet(self.values)
self.length = 4
@unittest.skip("Different repr")
def test_repr(self):
self.check_repr_against_values()
del TestBasicOps
# ==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
| TestBasicOpsMixedStringBytes |
python | readthedocs__readthedocs.org | readthedocs/api/v2/views/integrations.py | {
"start": 32847,
"end": 34921
} | class ____(APIView):
"""
Main webhook view for webhooks with an ID.
The handling of each view is handed off to another view. This should only
ever get webhook requests for established webhooks on our side. The other
views can receive webhooks for unknown webhooks, as all legacy webhooks will
be.
.. warning::
We're turning off Authentication for this view.
This fixes a bug where we were double-authenticating these views,
because of the way we're passing the request along to the subviews.
If at any time we add real logic to this view,
it will be completely unauthenticated.
"""
authentication_classes = []
VIEW_MAP = {
Integration.GITHUB_WEBHOOK: GitHubWebhookView,
Integration.GITLAB_WEBHOOK: GitLabWebhookView,
Integration.BITBUCKET_WEBHOOK: BitbucketWebhookView,
Integration.API_WEBHOOK: APIWebhookView,
}
def post(self, request, project_slug, integration_pk):
"""Set up webhook post view with request and project objects."""
# WARNING: this is a hack to allow us access to `request.body` later.
# Due to a limitation of DRF, we can't access `request.body`
# after accessing `request.data`.
# By accessing `request.body` we are able to access `request.body` and
# `request.data` later without any problem (mostly black magic).
# See #4940 for more background.
request.body # noqa
integration = get_object_or_404(
Integration,
project__slug=project_slug,
pk=integration_pk,
)
view_cls = self.VIEW_MAP[integration.integration_type]
view = view_cls.as_view(integration=integration)
# DRF uses ``rest_framework.request.Request`` and Django expects
# ``django.http.HttpRequest``
# https://www.django-rest-framework.org/api-guide/requests/
# https://github.com/encode/django-rest-framework/pull/5771#issuecomment-362815342
return view(request._request, project_slug)
| WebhookView |
python | django__django | tests/max_lengths/models.py | {
"start": 214,
"end": 450
} | class ____(models.Model):
email = models.EmailField(max_length=250)
vcard = models.FileField(max_length=250)
homepage = models.URLField(max_length=250)
avatar = models.FilePathField(max_length=250)
| PersonWithCustomMaxLengths |
python | jmcnamara__XlsxWriter | xlsxwriter/test/table/test_write_table_column.py | {
"start": 293,
"end": 819
} | class ____(unittest.TestCase):
"""
Test the Table _write_table_column() method.
"""
def setUp(self):
self.fh = StringIO()
self.table = Table()
self.table._set_filehandle(self.fh)
def test_write_table_column(self):
"""Test the _write_table_column() method"""
self.table._write_table_column({"name": "Column1", "id": 1})
exp = """<tableColumn id="1" name="Column1"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteTableColumn |
python | python__mypy | mypy/checkstrformat.py | {
"start": 4952,
"end": 10622
} | class ____:
def __init__(
self, match: Match[str], start_pos: int = -1, non_standard_format_spec: bool = False
) -> None:
self.whole_seq = match.group()
self.start_pos = start_pos
m_dict = match.groupdict()
self.key = m_dict.get("key")
# Replace unmatched optional groups with empty matches (for convenience).
self.conv_type = m_dict.get("type", "")
self.flags = m_dict.get("flags", "")
self.width = m_dict.get("width", "")
self.precision = m_dict.get("precision", "")
# Used only for str.format() calls (it may be custom for types with __format__()).
self.format_spec = m_dict.get("format_spec")
self.non_standard_format_spec = non_standard_format_spec
# Used only for str.format() calls.
self.conversion = m_dict.get("conversion")
# Full formatted expression (i.e. key plus following attributes and/or indexes).
# Used only for str.format() calls.
self.field = m_dict.get("field")
def has_key(self) -> bool:
return self.key is not None
def has_star(self) -> bool:
return self.width == "*" or self.precision == "*"
def parse_conversion_specifiers(format_str: str) -> list[ConversionSpecifier]:
"""Parse c-printf-style format string into list of conversion specifiers."""
specifiers: list[ConversionSpecifier] = []
for m in re.finditer(FORMAT_RE, format_str):
specifiers.append(ConversionSpecifier(m, start_pos=m.start()))
return specifiers
def parse_format_value(
format_value: str, ctx: Context, msg: MessageBuilder, nested: bool = False
) -> list[ConversionSpecifier] | None:
"""Parse format string into list of conversion specifiers.
The specifiers may be nested (two levels maximum), in this case they are ordered as
'{0:{1}}, {2:{3}{4}}'. Return None in case of an error.
"""
top_targets = find_non_escaped_targets(format_value, ctx, msg)
if top_targets is None:
return None
result: list[ConversionSpecifier] = []
for target, start_pos in top_targets:
match = FORMAT_RE_NEW.fullmatch(target)
if match:
conv_spec = ConversionSpecifier(match, start_pos=start_pos)
else:
custom_match = FORMAT_RE_NEW_CUSTOM.fullmatch(target)
if custom_match:
conv_spec = ConversionSpecifier(
custom_match, start_pos=start_pos, non_standard_format_spec=True
)
else:
msg.fail(
"Invalid conversion specifier in format string",
ctx,
code=codes.STRING_FORMATTING,
)
return None
if conv_spec.key and ("{" in conv_spec.key or "}" in conv_spec.key):
msg.fail("Conversion value must not contain { or }", ctx, code=codes.STRING_FORMATTING)
return None
result.append(conv_spec)
# Parse nested conversions that are allowed in format specifier.
if (
conv_spec.format_spec
and conv_spec.non_standard_format_spec
and ("{" in conv_spec.format_spec or "}" in conv_spec.format_spec)
):
if nested:
msg.fail(
"Formatting nesting must be at most two levels deep",
ctx,
code=codes.STRING_FORMATTING,
)
return None
sub_conv_specs = parse_format_value(conv_spec.format_spec, ctx, msg, nested=True)
if sub_conv_specs is None:
return None
result.extend(sub_conv_specs)
return result
def find_non_escaped_targets(
format_value: str, ctx: Context, msg: MessageBuilder
) -> list[tuple[str, int]] | None:
"""Return list of raw (un-parsed) format specifiers in format string.
Format specifiers don't include enclosing braces. We don't use regexp for
this because they don't work well with nested/repeated patterns
(both greedy and non-greedy), and these are heavily used internally for
representation of f-strings.
Return None in case of an error.
"""
result = []
next_spec = ""
pos = 0
nesting = 0
while pos < len(format_value):
c = format_value[pos]
if not nesting:
# Skip any paired '{{' and '}}', enter nesting on '{', report error on '}'.
if c == "{":
if pos < len(format_value) - 1 and format_value[pos + 1] == "{":
pos += 1
else:
nesting = 1
if c == "}":
if pos < len(format_value) - 1 and format_value[pos + 1] == "}":
pos += 1
else:
msg.fail(
"Invalid conversion specifier in format string: unexpected }",
ctx,
code=codes.STRING_FORMATTING,
)
return None
else:
# Adjust nesting level, then either continue adding chars or move on.
if c == "{":
nesting += 1
if c == "}":
nesting -= 1
if nesting:
next_spec += c
else:
result.append((next_spec, pos - len(next_spec)))
next_spec = ""
pos += 1
if nesting:
msg.fail(
"Invalid conversion specifier in format string: unmatched {",
ctx,
code=codes.STRING_FORMATTING,
)
return None
return result
| ConversionSpecifier |
python | bokeh__bokeh | src/bokeh/models/expressions.py | {
"start": 8561,
"end": 8845
} | class ____(Expression):
""" Base class for bi-variate expressions. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
transform = Instance(CoordinateTransform)
| XYComponent |
python | getsentry__sentry | src/sentry/feedback/usecases/title_generation.py | {
"start": 471,
"end": 3314
} | class ____(TypedDict):
"""Corresponds to GenerateFeedbackTitleRequest in Seer."""
organization_id: int
feedback_message: str
def truncate_feedback_title(title: str, max_words: int = 10) -> str:
"""
Truncate and format a title for user feedback issues.
Args:
title: The title to truncate
max_words: Maximum number of words to include from the title
Returns:
A truncated and formatted title string
"""
stripped_message = title.strip()
# Clean and split the message into words
words = stripped_message.split()
if len(words) <= max_words:
summary = stripped_message
else:
summary = " ".join(words[:max_words])
if len(summary) < len(stripped_message):
summary += "..."
# Truncate if necessary (keeping some buffer for external system limits)
if len(summary) > 185: # Conservative limit
summary = summary[:182] + "..."
return summary
@metrics.wraps("feedback.ai_title_generation")
def get_feedback_title_from_seer(feedback_message: str, organization_id: int) -> str | None:
"""
Generate an AI-powered title for user feedback using Seer, or None if generation fails.
Args:
feedback_message: The user's feedback message
organization_id: The ID of the organization
max_words: Maximum number of words to include from the generated title
Returns:
A title string or None if generation fails
"""
seer_request = GenerateFeedbackTitleRequest(
feedback_message=feedback_message,
organization_id=organization_id,
)
try:
response = make_signed_seer_api_request(
connection_pool=seer_summarization_connection_pool,
path=SEER_TITLE_GENERATION_ENDPOINT_PATH,
body=json.dumps(seer_request).encode("utf-8"),
timeout=SEER_TIMEOUT_S,
retries=SEER_RETRIES,
)
except Exception:
return None
if response.status < 200 or response.status >= 300:
logger.error(
"Seer title generation endpoint failed",
extra={"status_code": response.status, "response_data": response.data},
)
metrics.incr(
"feedback.ai_title_generation.error",
tags={"reason": "seer_response_failed"},
)
return None
try:
return response.json()["title"].strip() or None
except Exception:
return None
def get_feedback_title(feedback_message: str, organization_id: int, use_seer: bool) -> str:
if use_seer:
# Message is fallback if Seer fails.
raw_title = (
get_feedback_title_from_seer(feedback_message, organization_id) or feedback_message
)
else:
raw_title = feedback_message
return raw_title
| GenerateFeedbackTitleRequest |
python | bokeh__bokeh | tests/unit/bokeh/application/test_application.py | {
"start": 1950,
"end": 7462
} | class ____:
# Public methods ----------------------------------------------------------
def test_empty(self) -> None:
a = baa.Application()
doc = a.create_document()
assert not doc.roots
def test_invalid_kwarg(self) -> None:
with pytest.raises(TypeError):
baa.Application(junk="foo")
def test_process_request(self) -> None:
a = baa.Application()
a.add(RequestHandler(dict(a=10)))
a.add(RequestHandler(dict(b=20)))
a.add(RequestHandler(dict(a=30)))
assert a.process_request("request") == dict(a=30, b=20)
def test_one_handler(self) -> None:
a = baa.Application()
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
handler = FunctionHandler(add_roots)
a.add(handler)
doc = a.create_document()
assert len(doc.roots) == 2
def test_two_handlers(self) -> None:
a = baa.Application()
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
def add_one_root(doc):
doc.add_root(AnotherModelInTestApplication())
handler = FunctionHandler(add_roots)
a.add(handler)
handler2 = FunctionHandler(add_one_root)
a.add(handler2)
doc = a.create_document()
assert len(doc.roots) == 3
def test_two_handlers_in_init(self) -> None:
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
def add_one_root(doc):
doc.add_root(AnotherModelInTestApplication())
handler = FunctionHandler(add_roots)
handler2 = FunctionHandler(add_one_root)
a = baa.Application(handler, handler2)
doc = a.create_document()
assert len(doc.roots) == 3
def test_safe_to_fork(self) -> None:
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
def add_one_root(doc):
doc.add_root(AnotherModelInTestApplication())
handler = FunctionHandler(add_roots)
handler2 = FunctionHandler(add_one_root)
a = baa.Application(handler, handler2)
assert a.safe_to_fork
a.create_document()
assert not a.safe_to_fork
def test_metadata(self) -> None:
a = baa.Application(metadata="foo")
a.create_document()
assert a.metadata == "foo"
def test_failed_handler(self, caplog: pytest.LogCaptureFixture) -> None:
a = baa.Application()
handler = CodeHandler(filename="junk", source="bad(")
a.add(handler)
d = Document()
with caplog.at_level(logging.ERROR):
assert len(caplog.records) == 0
a.initialize_document(d)
assert len(caplog.records) == 1
def test_no_static_path(self) -> None:
a = baa.Application()
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
def add_one_root(doc):
doc.add_root(AnotherModelInTestApplication())
handler = FunctionHandler(add_roots)
a.add(handler)
handler2 = FunctionHandler(add_one_root)
a.add(handler2)
assert a.static_path is None
def test_static_path(self) -> None:
a = baa.Application()
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
def add_one_root(doc):
doc.add_root(AnotherModelInTestApplication())
handler = FunctionHandler(add_roots)
handler._static = "foo"
a.add(handler)
handler2 = FunctionHandler(add_one_root)
a.add(handler2)
assert a.static_path == "foo"
def test_excess_static_path(self) -> None:
a = baa.Application()
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
def add_one_root(doc):
doc.add_root(AnotherModelInTestApplication())
handler = FunctionHandler(add_roots)
handler._static = "foo"
a.add(handler)
handler2 = FunctionHandler(add_one_root)
handler2._static = "bar"
with pytest.raises(RuntimeError) as e:
a.add(handler2)
assert "More than one static path" in str(e.value)
@patch('bokeh.document.document.check_integrity')
def test_application_validates_document_by_default(self, check_integrity: MagicMock) -> None:
a = baa.Application()
d = Document()
d.add_root(figure())
a.initialize_document(d)
assert check_integrity.called
@patch('bokeh.document.document.check_integrity')
def test_application_doesnt_validate_document_due_to_env_var(self, check_integrity: MagicMock, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("BOKEH_VALIDATE_DOC", "false")
a = baa.Application()
d = Document()
d.add_root(figure())
a.initialize_document(d)
assert not check_integrity.called
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
| Test_Application |
python | ray-project__ray | python/ray/_private/profiling.py | {
"start": 1185,
"end": 2077
} | class ____:
# https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#heading=h.lpfof2aylapb # noqa
# The event categories. This is a comma separated list of categories
# for the event. The categories can be used to hide events in
# the Trace Viewer UI.
cat: str
# The string displayed on the event.
name: str
# The identifier for the group of rows that the event
# appears in.
pid: int
# The identifier for the row that the event appears in.
tid: int
# The start time in microseconds.
ts: int
# The duration in microseconds.
dur: int
# This is the name of the color to display the box in.
cname: str
# The extra user-defined data.
args: Dict[str, Union[str, int]]
# The event type (X means the complete event).
ph: str = "X"
@dataclass(init=True)
| ChromeTracingCompleteEvent |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/linear_prepack_fp16_test.py | {
"start": 589,
"end": 1358
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(
M, N, K, device=device, requires_grad=False, dtype=torch.float32
)
}
self.set_module_name("linear_prepack_fp16")
def forward(self, input_one):
return torch.ops.quantized.linear_prepack_fp16(input_one)
# The generated test names based on linear_prepack_fp16_short_configs will be in the following pattern:
# linear_prepack_fp16_M8_N16_K32_devicecpu
op_bench.generate_pt_test(
linear_prepack_fp16_long_configs + linear_prepack_fp16_short_configs,
LinearPrepackFP16Benchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| LinearPrepackFP16Benchmark |
python | streamlit__streamlit | lib/streamlit/elements/lib/built_in_chart_utils.py | {
"start": 1784,
"end": 2041
} | class ____(TypedDict):
"""Columns used for the prep_data step in Altair Arrow charts."""
x_column: str | None
y_column_list: list[str]
color_column: str | None
size_column: str | None
sort_column: str | None
@dataclass
| PrepDataColumns |
python | doocs__leetcode | solution/0400-0499/0414.Third Maximum Number/Solution.py | {
"start": 0,
"end": 396
} | class ____:
def thirdMax(self, nums: List[int]) -> int:
m1 = m2 = m3 = -inf
for num in nums:
if num in [m1, m2, m3]:
continue
if num > m1:
m3, m2, m1 = m2, m1, num
elif num > m2:
m3, m2 = m2, num
elif num > m3:
m3 = num
return m3 if m3 != -inf else m1
| Solution |
python | gevent__gevent | src/greentest/3.12/test_socket.py | {
"start": 24952,
"end": 78392
} | class ____(unittest.TestCase):
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_socket_type(self):
self.assertTrue(gc.is_tracked(_socket.socket))
with self.assertRaisesRegex(TypeError, "immutable"):
_socket.socket.foo = 1
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
support.gc_collect() # For PyPy or other GCs.
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
@unittest.skipIf(support.is_wasi, "WASI is missing these methods")
def test_socket_methods(self):
# socket methods that depend on a configure HAVE_ check. They should
# be present on all platforms except WASI.
names = [
"_accept", "bind", "connect", "connect_ex", "getpeername",
"getsockname", "listen", "recvfrom", "recvfrom_into", "sendto",
"setsockopt", "shutdown"
]
for name in names:
if not hasattr(socket.socket, name):
self.fail(f"socket method {name} is missing")
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test3542SocketOptions(self):
# Ref. issue #35569 and https://tools.ietf.org/html/rfc3542
opts = {
'IPV6_CHECKSUM',
'IPV6_DONTFRAG',
'IPV6_DSTOPTS',
'IPV6_HOPLIMIT',
'IPV6_HOPOPTS',
'IPV6_NEXTHOP',
'IPV6_PATHMTU',
'IPV6_PKTINFO',
'IPV6_RECVDSTOPTS',
'IPV6_RECVHOPLIMIT',
'IPV6_RECVHOPOPTS',
'IPV6_RECVPATHMTU',
'IPV6_RECVPKTINFO',
'IPV6_RECVRTHDR',
'IPV6_RECVTCLASS',
'IPV6_RTHDR',
'IPV6_RTHDRDSTOPTS',
'IPV6_RTHDR_TYPE_0',
'IPV6_TCLASS',
'IPV6_USE_MIN_MTU',
}
for opt in opts:
self.assertTrue(
hasattr(socket, opt), f"Missing RFC3542 socket option '{opt}'"
)
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test socket_helper.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [socket_helper.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS and AT&T, may successfully
# resolve these IPs. In particular, AT&T's DNS Error Assist service
# will break this test. See https://bugs.python.org/issue42092 for a
# workaround.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OverflowError, socket.if_indextoname, -1)
self.assertRaises(OverflowError, socket.if_indextoname, 2**1000)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
if hasattr(socket, 'if_nameindex'):
indices = dict(socket.if_nameindex())
for index in indices:
index2 = index + 2**32
if index2 not in indices:
with self.assertRaises((OverflowError, OSError)):
socket.if_indextoname(index2)
for index in 2**32-1, 2**64-1:
if index not in indices:
with self.assertRaises((OverflowError, OSError)):
socket.if_indextoname(index)
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = (
l_bad_values +
[_testcapi.INT_MIN-1, _testcapi.INT_MAX+1] +
[1 << 16, _testcapi.INT_MAX]
)
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind(("0.0.0.0", port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = socket_helper.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if socket_helper.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(repr(family), '<AddressFamily.AF_INET: %r>' % family.value)
self.assertEqual(str(family), str(family.value))
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(repr(type), '<SocketKind.SOCK_STREAM: %r>' % type.value)
self.assertEqual(str(type), str(type.value))
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getaddrinfo_int_port_overflow(self):
# gh-74895: Test that getaddrinfo does not raise OverflowError on port.
#
# POSIX getaddrinfo() never specify the valid range for "service"
# decimal port number values. For IPv4 and IPv6 they are technically
# unsigned 16-bit values, but the API is protocol agnostic. Which values
# trigger an error from the C library function varies by platform as
# they do not all perform validation.
# The key here is that we don't want to produce OverflowError as Python
# prior to 3.12 did for ints outside of a [LONG_MIN, LONG_MAX] range.
# Leave the error up to the underlying string based platform C API.
from _testcapi import ULONG_MAX, LONG_MAX, LONG_MIN
try:
socket.getaddrinfo(None, ULONG_MAX + 1, type=socket.SOCK_STREAM)
except OverflowError:
# Platforms differ as to what values constitute a getaddrinfo() error
# return. Some fail for LONG_MAX+1, others ULONG_MAX+1, and Windows
# silently accepts such huge "port" aka "service" numeric values.
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
try:
socket.getaddrinfo(None, LONG_MAX + 1, type=socket.SOCK_STREAM)
except OverflowError:
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
try:
socket.getaddrinfo(None, LONG_MAX - 0xffff + 1, type=socket.SOCK_STREAM)
except OverflowError:
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
try:
socket.getaddrinfo(None, LONG_MIN - 1, type=socket.SOCK_STREAM)
except OverflowError:
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
socket.getaddrinfo(None, 0, type=socket.SOCK_STREAM) # No error expected.
socket.getaddrinfo(None, 0xffff, type=socket.SOCK_STREAM) # No error expected.
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with socket_helper.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(TimeoutError, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
encoding = None if "b" in mode else "utf-8"
with sock.makefile(mode, encoding=encoding) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(socket_helper.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
def test_getfqdn_filter_localhost(self):
self.assertEqual(socket.getfqdn(), socket.getfqdn("0.0.0.0"))
self.assertEqual(socket.getfqdn(), socket.getfqdn("::"))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(repr(s.family), '<AddressFamily.AF_INET: %r>' % s.family.value)
self.assertEqual(repr(s.type), '<SocketKind.SOCK_STREAM: %r>' % s.type.value)
self.assertEqual(str(s.family), str(s.family.value))
self.assertEqual(str(s.type), str(s.type.value))
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if socket_helper.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
unix_name = socket_helper.create_unix_domain_name()
self.addCleanup(os_helper.unlink, unix_name)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with s:
try:
s.bind(unix_name)
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
def test_addressfamily_enum(self):
import _socket, enum
CheckedAddressFamily = enum._old_convert_(
enum.IntEnum, 'AddressFamily', 'socket',
lambda C: C.isupper() and C.startswith('AF_'),
source=_socket,
)
enum._test_simple_enum(CheckedAddressFamily, socket.AddressFamily)
def test_socketkind_enum(self):
import _socket, enum
CheckedSocketKind = enum._old_convert_(
enum.IntEnum, 'SocketKind', 'socket',
lambda C: C.isupper() and C.startswith('SOCK_'),
source=_socket,
)
enum._test_simple_enum(CheckedSocketKind, socket.SocketKind)
def test_msgflag_enum(self):
import _socket, enum
CheckedMsgFlag = enum._old_convert_(
enum.IntFlag, 'MsgFlag', 'socket',
lambda C: C.isupper() and C.startswith('MSG_'),
source=_socket,
)
enum._test_simple_enum(CheckedMsgFlag, socket.MsgFlag)
def test_addressinfo_enum(self):
import _socket, enum
CheckedAddressInfo = enum._old_convert_(
enum.IntFlag, 'AddressInfo', 'socket',
lambda C: C.isupper() and C.startswith('AI_'),
source=_socket)
enum._test_simple_enum(CheckedAddressInfo, socket.AddressInfo)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
| GeneralModuleTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.