language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | miyuchina__mistletoe | test/test_line_numbers.py | {
"start": 212,
"end": 2201
} | class ____(unittest.TestCase):
def setUp(self) -> None:
block_token.add_token(block_token.HTMLBlock)
span_token.add_token(span_token.HTMLSpan)
block_token.remove_token(block_token.Footnote)
block_token.add_token(LinkReferenceDefinitionBlock)
return super().setUp()
def tearDown(self) -> None:
span_token.reset_tokens()
block_token.reset_tokens()
return super().tearDown()
def test_main(self):
# see line_numbers.md for a description of how the test works.
NUMBER_OF_LINE_NUMBERS_TO_BE_CHECKED = 13
with open("test/samples/line_numbers.md", "r") as fin:
document = block_token.Document(fin)
count = self.check_line_numbers(document)
self.assertEqual(count, NUMBER_OF_LINE_NUMBERS_TO_BE_CHECKED)
def check_line_numbers(self, token: block_token.BlockToken):
"""Check the line number on the given block token and its children, if possible."""
count = 0
line_number = self.get_expected_line_number(token)
if line_number:
self.assertEqual(token.line_number, line_number)
count += 1
if isinstance(token, block_token.Table):
count += self.check_line_numbers(token.header)
for child in token.children:
if isinstance(child, block_token.BlockToken):
count += self.check_line_numbers(child)
return count
def get_expected_line_number(self, token: block_token.BlockToken):
# the expected line number, if it exists, should be wrapped in an inline
# code token and be an immediate child of the token.
# or it could be the title of a link reference definition.
for child in token.children:
if isinstance(child, span_token.InlineCode):
return int(child.children[0].content)
if isinstance(child, LinkReferenceDefinition):
return int(child.title)
| TestLineNumbers |
python | anthropics__anthropic-sdk-python | tests/test_client.py | {
"start": 38052,
"end": 75894
} | class ____:
@pytest.mark.respx(base_url=base_url)
async def test_raw_response(self, respx_mock: MockRouter, async_client: AsyncAnthropic) -> None:
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await async_client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
@pytest.mark.respx(base_url=base_url)
async def test_raw_response_for_binary(self, respx_mock: MockRouter, async_client: AsyncAnthropic) -> None:
respx_mock.post("/foo").mock(
return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}')
)
response = await async_client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
def test_copy(self, async_client: AsyncAnthropic) -> None:
copied = async_client.copy()
assert id(copied) != id(async_client)
copied = async_client.copy(api_key="another my-anthropic-api-key")
assert copied.api_key == "another my-anthropic-api-key"
assert async_client.api_key == "my-anthropic-api-key"
def test_copy_default_options(self, async_client: AsyncAnthropic) -> None:
# options that have a default are overridden correctly
copied = async_client.copy(max_retries=7)
assert copied.max_retries == 7
assert async_client.max_retries == 2
copied2 = copied.copy(max_retries=6)
assert copied2.max_retries == 6
assert copied.max_retries == 7
# timeout
assert isinstance(async_client.timeout, httpx.Timeout)
copied = async_client.copy(timeout=None)
assert copied.timeout is None
assert isinstance(async_client.timeout, httpx.Timeout)
async def test_copy_default_headers(self) -> None:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
)
assert client.default_headers["X-Foo"] == "bar"
# does not override the already given value when not specified
copied = client.copy()
assert copied.default_headers["X-Foo"] == "bar"
# merges already given headers
copied = client.copy(default_headers={"X-Bar": "stainless"})
assert copied.default_headers["X-Foo"] == "bar"
assert copied.default_headers["X-Bar"] == "stainless"
# uses new values for any already given headers
copied = client.copy(default_headers={"X-Foo": "stainless"})
assert copied.default_headers["X-Foo"] == "stainless"
# set_default_headers
# completely overrides already set values
copied = client.copy(set_default_headers={})
assert copied.default_headers.get("X-Foo") is None
copied = client.copy(set_default_headers={"X-Bar": "Robert"})
assert copied.default_headers["X-Bar"] == "Robert"
with pytest.raises(
ValueError,
match="`default_headers` and `set_default_headers` arguments are mutually exclusive",
):
client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"})
await client.close()
async def test_copy_default_query(self) -> None:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"}
)
assert _get_params(client)["foo"] == "bar"
# does not override the already given value when not specified
copied = client.copy()
assert _get_params(copied)["foo"] == "bar"
# merges already given params
copied = client.copy(default_query={"bar": "stainless"})
params = _get_params(copied)
assert params["foo"] == "bar"
assert params["bar"] == "stainless"
# uses new values for any already given headers
copied = client.copy(default_query={"foo": "stainless"})
assert _get_params(copied)["foo"] == "stainless"
# set_default_query
# completely overrides already set values
copied = client.copy(set_default_query={})
assert _get_params(copied) == {}
copied = client.copy(set_default_query={"bar": "Robert"})
assert _get_params(copied)["bar"] == "Robert"
with pytest.raises(
ValueError,
# TODO: update
match="`default_query` and `set_default_query` arguments are mutually exclusive",
):
client.copy(set_default_query={}, default_query={"foo": "Bar"})
await client.close()
def test_copy_signature(self, async_client: AsyncAnthropic) -> None:
# ensure the same parameters that can be passed to the client are defined in the `.copy()` method
init_signature = inspect.signature(
# mypy doesn't like that we access the `__init__` property.
async_client.__init__, # type: ignore[misc]
)
copy_signature = inspect.signature(async_client.copy)
exclude_params = {"transport", "proxies", "_strict_response_validation"}
for name in init_signature.parameters.keys():
if name in exclude_params:
continue
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12")
def test_copy_build_request(self, async_client: AsyncAnthropic) -> None:
options = FinalRequestOptions(method="get", url="/foo")
def build_request(options: FinalRequestOptions) -> None:
client_copy = async_client.copy()
client_copy._build_request(options)
# ensure that the machinery is warmed up before tracing starts.
build_request(options)
gc.collect()
tracemalloc.start(1000)
snapshot_before = tracemalloc.take_snapshot()
ITERATIONS = 10
for _ in range(ITERATIONS):
build_request(options)
gc.collect()
snapshot_after = tracemalloc.take_snapshot()
tracemalloc.stop()
def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None:
if diff.count == 0:
# Avoid false positives by considering only leaks (i.e. allocations that persist).
return
if diff.count % ITERATIONS != 0:
# Avoid false positives by considering only leaks that appear per iteration.
return
for frame in diff.traceback:
if any(
frame.filename.endswith(fragment)
for fragment in [
# to_raw_response_wrapper leaks through the @functools.wraps() decorator.
#
# removing the decorator fixes the leak for reasons we don't understand.
"anthropic/_legacy_response.py",
"anthropic/_response.py",
# pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason.
"anthropic/_compat.py",
# Standard library leaks we don't care about.
"/logging/__init__.py",
]
):
return
leaks.append(diff)
leaks: list[tracemalloc.StatisticDiff] = []
for diff in snapshot_after.compare_to(snapshot_before, "traceback"):
add_leak(leaks, diff)
if leaks:
for leak in leaks:
print("MEMORY LEAK:", leak)
for frame in leak.traceback:
print(frame)
raise AssertionError()
async def test_request_timeout(self, async_client: AsyncAnthropic) -> None:
request = async_client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
request = async_client._build_request(
FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0))
)
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(100.0)
async def test_client_timeout_option(self) -> None:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0)
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(0)
await client.close()
async def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
async with httpx.AsyncClient(timeout=None) as http_client:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(None)
await client.close()
# no timeout given to the httpx client should not use the httpx default
async with httpx.AsyncClient() as http_client:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
await client.close()
# explicitly passing the default timeout currently results in it being ignored
async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT # our default
await client.close()
def test_invalid_http_client(self) -> None:
with pytest.raises(TypeError, match="Invalid `http_client` arg"):
with httpx.Client() as http_client:
AsyncAnthropic(
base_url=base_url,
api_key=api_key,
_strict_response_validation=True,
http_client=cast(Any, http_client),
)
async def test_default_headers_option(self) -> None:
test_client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
)
request = test_client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
assert request.headers.get("x-stainless-lang") == "python"
test_client2 = AsyncAnthropic(
base_url=base_url,
api_key=api_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
"X-Stainless-Lang": "my-overriding-header",
},
)
request = test_client2._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "stainless"
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
await test_client.close()
await test_client2.close()
def test_validate_headers(self) -> None:
client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("X-Api-Key") == api_key
with update_env(**{"ANTHROPIC_API_KEY": Omit()}):
client2 = AsyncAnthropic(base_url=base_url, api_key=None, _strict_response_validation=True)
with pytest.raises(
TypeError,
match="Could not resolve authentication method. Expected either api_key or auth_token to be set. Or for one of the `X-Api-Key` or `Authorization` headers to be explicitly omitted",
):
client2._build_request(FinalRequestOptions(method="get", url="/foo"))
request2 = client2._build_request(FinalRequestOptions(method="get", url="/foo", headers={"X-Api-Key": Omit()}))
assert request2.headers.get("X-Api-Key") is None
async def test_default_query_option(self) -> None:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
assert dict(url.params) == {"query_param": "bar"}
request = client._build_request(
FinalRequestOptions(
method="get",
url="/foo",
params={"foo": "baz", "query_param": "overridden"},
)
)
url = httpx.URL(request.url)
assert dict(url.params) == {"foo": "baz", "query_param": "overridden"}
await client.close()
def test_request_extra_json(self, client: Anthropic) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar"},
extra_json={"baz": False},
),
)
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": False}
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
extra_json={"baz": False},
),
)
data = json.loads(request.content.decode("utf-8"))
assert data == {"baz": False}
# `extra_json` takes priority over `json_data` when keys clash
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar", "baz": True},
extra_json={"baz": None},
),
)
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": None}
def test_request_extra_headers(self, client: Anthropic) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(extra_headers={"X-Foo": "Foo"}),
),
)
assert request.headers.get("X-Foo") == "Foo"
# `extra_headers` takes priority over `default_headers` when keys clash
request = client.with_options(default_headers={"X-Bar": "true"})._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
extra_headers={"X-Bar": "false"},
),
),
)
assert request.headers.get("X-Bar") == "false"
def test_request_extra_query(self, client: Anthropic) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
extra_query={"my_query_param": "Foo"},
),
),
)
params = dict(request.url.params)
assert params == {"my_query_param": "Foo"}
# if both `query` and `extra_query` are given, they are merged
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
query={"bar": "1"},
extra_query={"foo": "2"},
),
),
)
params = dict(request.url.params)
assert params == {"bar": "1", "foo": "2"}
# `extra_query` takes priority over `query` when keys clash
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
query={"foo": "1"},
extra_query={"foo": "2"},
),
),
)
params = dict(request.url.params)
assert params == {"foo": "2"}
def test_multipart_repeating_array(self, async_client: AsyncAnthropic) -> None:
request = async_client._build_request(
FinalRequestOptions.construct(
method="post",
url="/foo",
headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"},
json_data={"array": ["foo", "bar"]},
files=[("foo.txt", b"hello world")],
)
)
assert request.read().split(b"\r\n") == [
b"--6b7ba517decee4a450543ea6ae821c82",
b'Content-Disposition: form-data; name="array[]"',
b"",
b"foo",
b"--6b7ba517decee4a450543ea6ae821c82",
b'Content-Disposition: form-data; name="array[]"',
b"",
b"bar",
b"--6b7ba517decee4a450543ea6ae821c82",
b'Content-Disposition: form-data; name="foo.txt"; filename="upload"',
b"Content-Type: application/octet-stream",
b"",
b"hello world",
b"--6b7ba517decee4a450543ea6ae821c82--",
b"",
]
@pytest.mark.respx(base_url=base_url)
async def test_basic_union_response(self, respx_mock: MockRouter, async_client: AsyncAnthropic) -> None:
class Model1(BaseModel):
name: str
class Model2(BaseModel):
foo: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
@pytest.mark.respx(base_url=base_url)
async def test_union_response_different_types(self, respx_mock: MockRouter, async_client: AsyncAnthropic) -> None:
"""Union of objects with the same field name using a different type"""
class Model1(BaseModel):
foo: int
class Model2(BaseModel):
foo: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1}))
response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model1)
assert response.foo == 1
@pytest.mark.respx(base_url=base_url)
async def test_non_application_json_content_type_for_json_data(
self, respx_mock: MockRouter, async_client: AsyncAnthropic
) -> None:
"""
Response that sets Content-Type to something other than application/json but returns json data
"""
class Model(BaseModel):
foo: int
respx_mock.get("/foo").mock(
return_value=httpx.Response(
200,
content=json.dumps({"foo": 2}),
headers={"Content-Type": "application/text"},
)
)
response = await async_client.get("/foo", cast_to=Model)
assert isinstance(response, Model)
assert response.foo == 2
async def test_base_url_setter(self) -> None:
client = AsyncAnthropic(
base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True
)
assert client.base_url == "https://example.com/from_init/"
client.base_url = "https://example.com/from_setter" # type: ignore[assignment]
assert client.base_url == "https://example.com/from_setter/"
await client.close()
async def test_base_url_env(self) -> None:
with update_env(ANTHROPIC_BASE_URL="http://localhost:5000/from/env"):
client = AsyncAnthropic(api_key=api_key, _strict_response_validation=True)
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
"client",
[
AsyncAnthropic(
base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
),
AsyncAnthropic(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
],
ids=["standard", "custom http client"],
)
async def test_base_url_trailing_slash(self, client: AsyncAnthropic) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar"},
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
await client.close()
@pytest.mark.parametrize(
"client",
[
AsyncAnthropic(
base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
),
AsyncAnthropic(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
],
ids=["standard", "custom http client"],
)
async def test_base_url_no_trailing_slash(self, client: AsyncAnthropic) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar"},
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
await client.close()
@pytest.mark.parametrize(
"client",
[
AsyncAnthropic(
base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
),
AsyncAnthropic(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
],
ids=["standard", "custom http client"],
)
async def test_absolute_request_url(self, client: AsyncAnthropic) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
url="https://myapi.com/foo",
json_data={"foo": "bar"},
),
)
assert request.url == "https://myapi.com/foo"
await client.close()
async def test_copied_client_does_not_close_http(self) -> None:
test_client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
assert not test_client.is_closed()
copied = test_client.copy()
assert copied is not test_client
del copied
await asyncio.sleep(0.2)
assert not test_client.is_closed()
async def test_client_context_manager(self) -> None:
test_client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
async with test_client as c2:
assert c2 is test_client
assert not c2.is_closed()
assert not test_client.is_closed()
assert test_client.is_closed()
@pytest.mark.respx(base_url=base_url)
async def test_client_response_validation_error(self, respx_mock: MockRouter, async_client: AsyncAnthropic) -> None:
class Model(BaseModel):
foo: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}}))
with pytest.raises(APIResponseValidationError) as exc:
await async_client.get("/foo", cast_to=Model)
assert isinstance(exc.value.__cause__, ValidationError)
async def test_client_max_retries_validation(self) -> None:
with pytest.raises(TypeError, match=r"max_retries cannot be None"):
AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None)
)
@pytest.mark.respx(base_url=base_url)
async def test_default_stream_cls(self, respx_mock: MockRouter, async_client: AsyncAnthropic) -> None:
class Model(BaseModel):
name: str
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
stream = await async_client.post("/foo", cast_to=Model, stream=True, stream_cls=AsyncStream[Model])
assert isinstance(stream, AsyncStream)
await stream.response.aclose()
@pytest.mark.respx(base_url=base_url)
async def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None:
class Model(BaseModel):
name: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
strict_client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
with pytest.raises(APIResponseValidationError):
await strict_client.get("/foo", cast_to=Model)
non_strict_client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=False)
response = await non_strict_client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
await strict_client.close()
await non_strict_client.close()
@pytest.mark.parametrize(
"remaining_retries,retry_after,timeout",
[
[3, "20", 20],
[3, "0", 0.5],
[3, "-10", 0.5],
[3, "60", 60],
[3, "61", 0.5],
[3, "Fri, 29 Sep 2023 16:26:57 GMT", 20],
[3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5],
[3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5],
[3, "Fri, 29 Sep 2023 16:27:37 GMT", 60],
[3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5],
[3, "99999999999999999999999999999999999", 0.5],
[3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5],
[3, "", 0.5],
[2, "", 0.5 * 2.0],
[1, "", 0.5 * 4.0],
[-1100, "", 8], # test large number potentially overflowing
],
)
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
async def test_parse_retry_after_header(
self, remaining_retries: int, retry_after: str, timeout: float, async_client: AsyncAnthropic
) -> None:
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
calculated = async_client._calculate_retry_timeout(remaining_retries, options, headers)
assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
@mock.patch("anthropic._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retrying_timeout_errors_doesnt_leak(
self, respx_mock: MockRouter, async_client: AsyncAnthropic
) -> None:
respx_mock.post("/v1/messages").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
await async_client.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
).__aenter__()
assert _get_open_connections(async_client) == 0
@mock.patch("anthropic._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retrying_status_errors_doesnt_leak(
self, respx_mock: MockRouter, async_client: AsyncAnthropic
) -> None:
respx_mock.post("/v1/messages").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
await async_client.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
).__aenter__()
assert _get_open_connections(async_client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
@mock.patch("anthropic._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
@pytest.mark.parametrize("failure_mode", ["status", "exception"])
async def test_retries_taken(
self,
async_client: AsyncAnthropic,
failures_before_success: int,
failure_mode: Literal["status", "exception"],
respx_mock: MockRouter,
) -> None:
client = async_client.with_options(max_retries=4)
nb_retries = 0
def retry_handler(_request: httpx.Request) -> httpx.Response:
nonlocal nb_retries
if nb_retries < failures_before_success:
nb_retries += 1
if failure_mode == "exception":
raise RuntimeError("oops")
return httpx.Response(500)
return httpx.Response(200)
respx_mock.post("/v1/messages").mock(side_effect=retry_handler)
response = await client.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
)
assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
@mock.patch("anthropic._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_omit_retry_count_header(
self, async_client: AsyncAnthropic, failures_before_success: int, respx_mock: MockRouter
) -> None:
client = async_client.with_options(max_retries=4)
nb_retries = 0
def retry_handler(_request: httpx.Request) -> httpx.Response:
nonlocal nb_retries
if nb_retries < failures_before_success:
nb_retries += 1
return httpx.Response(500)
return httpx.Response(200)
respx_mock.post("/v1/messages").mock(side_effect=retry_handler)
response = await client.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
extra_headers={"x-stainless-retry-count": Omit()},
)
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
@mock.patch("anthropic._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_overwrite_retry_count_header(
self, async_client: AsyncAnthropic, failures_before_success: int, respx_mock: MockRouter
) -> None:
client = async_client.with_options(max_retries=4)
nb_retries = 0
def retry_handler(_request: httpx.Request) -> httpx.Response:
nonlocal nb_retries
if nb_retries < failures_before_success:
nb_retries += 1
return httpx.Response(500)
return httpx.Response(200)
respx_mock.post("/v1/messages").mock(side_effect=retry_handler)
response = await client.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
extra_headers={"x-stainless-retry-count": "42"},
)
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
@mock.patch("anthropic._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retries_taken_new_response_class(
self, async_client: AsyncAnthropic, failures_before_success: int, respx_mock: MockRouter
) -> None:
client = async_client.with_options(max_retries=4)
nb_retries = 0
def retry_handler(_request: httpx.Request) -> httpx.Response:
nonlocal nb_retries
if nb_retries < failures_before_success:
nb_retries += 1
return httpx.Response(500)
return httpx.Response(200)
respx_mock.post("/v1/messages").mock(side_effect=retry_handler)
async with client.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
) as response:
assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
async def test_get_platform(self) -> None:
platform = await asyncify(get_platform)()
assert isinstance(platform, (str, OtherPlatform))
async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Test that the proxy environment variables are set correctly
monkeypatch.setenv("HTTPS_PROXY", "https://example.org")
client = DefaultAsyncHttpxClient()
mounts = tuple(client._mounts.items())
assert len(mounts) == 1
assert mounts[0][0].pattern == "https://"
@pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning")
async def test_default_client_creation(self) -> None:
# Ensure that the client can be initialized without any exceptions
DefaultAsyncHttpxClient(
verify=True,
cert=None,
trust_env=True,
http1=True,
http2=False,
limits=httpx.Limits(max_connections=100, max_keepalive_connections=20),
)
@pytest.mark.respx(base_url=base_url)
async def test_follow_redirects(self, respx_mock: MockRouter, async_client: AsyncAnthropic) -> None:
# Test that the default follow_redirects=True allows following redirects
respx_mock.post("/redirect").mock(
return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
)
respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
response = await async_client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
assert response.status_code == 200
assert response.json() == {"status": "ok"}
@pytest.mark.respx(base_url=base_url)
async def test_follow_redirects_disabled(self, respx_mock: MockRouter, async_client: AsyncAnthropic) -> None:
# Test that follow_redirects=False prevents following redirects
respx_mock.post("/redirect").mock(
return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
)
with pytest.raises(APIStatusError) as exc_info:
await async_client.post(
"/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response
)
assert exc_info.value.response.status_code == 302
assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"
| TestAsyncAnthropic |
python | huggingface__transformers | src/transformers/models/deberta_v2/modeling_deberta_v2.py | {
"start": 36414,
"end": 40278
} | class ____(DebertaV2PreTrainedModel):
_tied_weights_keys = {
"cls.predictions.decoder.bias": "cls.predictions.bias",
"cls.predictions.decoder.weight": "deberta.embeddings.word_embeddings.weight",
}
_keys_to_ignore_on_load_unexpected = [r"mask_predictions.*"]
def __init__(self, config):
super().__init__(config)
self.legacy = config.legacy
self.deberta = DebertaV2Model(config)
if self.legacy:
self.cls = LegacyDebertaV2OnlyMLMHead(config)
else:
self._tied_weights_keys = {
"lm_predictions.lm_head.weight": "deberta.embeddings.word_embeddings.weight",
}
self.lm_predictions = DebertaV2OnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
if self.legacy:
return self.cls.predictions.decoder
else:
return self.lm_predictions.lm_head.dense
def set_output_embeddings(self, new_embeddings):
if self.legacy:
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
else:
self.lm_predictions.lm_head.dense = new_embeddings
self.lm_predictions.lm_head.bias = new_embeddings.bias
@auto_docstring
# Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM.forward with Deberta->DebertaV2
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
if self.legacy:
prediction_scores = self.cls(sequence_output)
else:
prediction_scores = self.lm_predictions(sequence_output, self.deberta.embeddings.word_embeddings)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.deberta.modeling_deberta.ContextPooler
| DebertaV2ForMaskedLM |
python | qdrant__qdrant-client | qdrant_client/local/payload_value_setter.py | {
"start": 2871,
"end": 4809
} | class ____(Setter):
TYPE = dict
@classmethod
def _set_compatible_types(
cls,
data: Any,
current_key: JsonPathItem,
k_list: list[JsonPathItem],
value: dict[str, Any],
) -> None:
if current_key.key not in data:
data[current_key.key] = {}
if len(k_list) == 0:
if isinstance(data[current_key.key], dict):
data[current_key.key].update(value)
else:
data[current_key.key] = value
else:
cls.set(data[current_key.key], k_list.copy(), value, data, current_key)
@classmethod
def _set_incompatible_types(
cls,
current_key: JsonPathItem,
k_list: list[JsonPathItem],
value: dict[str, Any],
prev_data: Any,
prev_key: Optional[JsonPathItem],
) -> None:
assert prev_key is not None
if len(k_list) == 0:
if prev_key.item_type == JsonPathItemType.KEY:
prev_data[prev_key.key] = {current_key.key: value}
else: # if prev key was WILDCARD, we need to pass INDEX instead with an index set
prev_data[prev_key.index] = {current_key.key: value}
else:
if prev_key.item_type == JsonPathItemType.KEY:
prev_data[prev_key.key] = {current_key.key: {}}
cls.set(
prev_data[prev_key.key][current_key.key],
k_list.copy(),
value,
prev_data[prev_key.key],
current_key,
)
else:
prev_data[prev_key.index] = {current_key.key: {}}
cls.set(
prev_data[prev_key.index][current_key.key],
k_list.copy(),
value,
prev_data[prev_key.index],
current_key,
)
| KeySetter |
python | pypa__pip | src/pip/_vendor/distro/distro.py | {
"start": 1742,
"end": 1826
} | class ____(TypedDict):
major: str
minor: str
build_number: str
| VersionDict |
python | django__django | django/db/models/sql/query.py | {
"start": 116447,
"end": 120950
} | class ____:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def __repr__(self):
return (
f"{self.__class__.__qualname__}(connector={self.connector!r}, "
f"num_children={self.num_children!r}, negated={self.negated!r})"
)
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == OR and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == AND or (
self.effective_connector == OR and votes == self.num_children
):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| JoinPromoter |
python | astropy__astropy | astropy/io/ascii/ecsv.py | {
"start": 9979,
"end": 16745
} | class ____(core.TableOutputter):
"""
After reading the input lines and processing, convert the Reader columns
and metadata to an astropy.table.Table object. This overrides the default
converters to be an empty list because there is no "guessing" of the
conversion function.
"""
default_converters = []
def __call__(self, cols, meta):
# Convert to a Table with all plain Column subclass columns
out = super().__call__(cols, meta)
# If mixin columns exist (based on the special '__mixin_columns__'
# key in the table ``meta``), then use that information to construct
# appropriate mixin columns and remove the original data columns.
# If no __mixin_columns__ exists then this function just passes back
# the input table.
return serialize._construct_mixins_from_columns(out)
def _convert_vals(self, cols):
"""READ: Convert str_vals in `cols` to final arrays with correct dtypes.
This is adapted from ``BaseOutputter._convert_vals``. In the case of ECSV
there is no guessing and all types are known in advance. A big change
is handling the possibility of JSON-encoded values, both unstructured
object data and structured values that may contain masked data.
"""
for col in cols:
try:
# 1-d or N-d object columns are serialized as JSON.
if col.subtype == "object":
_check_dtype_is_str(col)
col_vals = [json.loads(val) for val in col.str_vals]
col.data = np.empty([len(col_vals)] + col.shape, dtype=object)
col.data[...] = col_vals
# Variable length arrays with shape (n, m, ..., *) for fixed
# n, m, .. and variable in last axis. Masked values here are
# not currently supported.
elif col.shape and col.shape[-1] is None:
_check_dtype_is_str(col)
# Empty (blank) values in original ECSV are changed to "0"
# in str_vals with corresponding col.mask being created and
# set accordingly. Instead use an empty list here.
if hasattr(col, "mask"):
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = "[]"
# Remake as a 1-d object column of numpy ndarrays or
# MaskedArray using the datatype specified in the ECSV file.
col_vals = []
for str_val in col.str_vals:
obj_val = json.loads(str_val) # list or nested lists
try:
arr_val = np.array(obj_val, dtype=col.subtype)
except TypeError:
# obj_val has entries that are inconsistent with
# dtype. For a valid ECSV file the only possibility
# is None values (indicating missing values).
data = np.array(obj_val, dtype=object)
# Replace all the None with an appropriate fill value
mask = data == None
kind = np.dtype(col.subtype).kind
data[mask] = {"U": "", "S": b""}.get(kind, 0)
arr_val = np.ma.array(data.astype(col.subtype), mask=mask)
col_vals.append(arr_val)
col.shape = ()
col.dtype = np.dtype(object)
# np.array(col_vals_arr, dtype=object) fails ?? so this workaround:
col.data = np.empty(len(col_vals), dtype=object)
col.data[:] = col_vals
# Multidim columns with consistent shape (n, m, ...). These
# might be masked.
elif col.shape:
_check_dtype_is_str(col)
# Change empty (blank) values in original ECSV to something
# like "[[null, null],[null,null]]" so subsequent JSON
# decoding works. Delete `col.mask` so that later code in
# core TableOutputter.__call__() that deals with col.mask
# does not run (since handling is done here already).
if hasattr(col, "mask"):
all_none_arr = np.full(
shape=col.shape, fill_value=None, dtype=object
)
all_none_json = json.dumps(all_none_arr.tolist())
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = all_none_json
del col.mask
col_vals = [json.loads(val) for val in col.str_vals]
# Make a numpy object array of col_vals to look for None
# (masked values)
data = np.array(col_vals, dtype=object)
mask = data == None
if not np.any(mask):
# No None's, just convert to required dtype
col.data = data.astype(col.subtype)
else:
# Replace all the None with an appropriate fill value
kind = np.dtype(col.subtype).kind
data[mask] = {"U": "", "S": b""}.get(kind, 0)
# Finally make a MaskedArray with the filled data + mask
col.data = np.ma.array(data.astype(col.subtype), mask=mask)
# Regular scalar value column
else:
if col.subtype:
warnings.warn(
f"unexpected subtype {col.subtype!r} set for column "
f"{col.name!r}, using dtype={col.dtype!r} instead.",
category=InvalidEcsvDatatypeWarning,
)
converter_func, _ = convert_numpy(col.dtype)
col.data = converter_func(col.str_vals)
if col.data.shape[1:] != tuple(col.shape):
raise ValueError(
"shape mismatch between value and column specifier"
)
except json.JSONDecodeError:
raise ValueError(
f"column {col.name!r} failed to convert: "
"column value is not valid JSON"
)
except Exception as exc:
raise ValueError(f"column {col.name!r} failed to convert: {exc}")
| EcsvOutputter |
python | getsentry__sentry | src/sentry/analytics/events/rule_snooze.py | {
"start": 289,
"end": 397
} | class ____(RuleSnoozeAction):
until: str | None = None
@analytics.eventclass("rule.unsnoozed")
| RuleSnoozed |
python | arrow-py__arrow | arrow/locales.py | {
"start": 56353,
"end": 57654
} | class ____(Locale):
names = ["az", "az-az"]
past = "{0} əvvəl"
future = "{0} sonra"
timeframes = {
"now": "indi",
"second": "bir saniyə",
"seconds": "{0} saniyə",
"minute": "bir dəqiqə",
"minutes": "{0} dəqiqə",
"hour": "bir saat",
"hours": "{0} saat",
"day": "bir gün",
"days": "{0} gün",
"week": "bir həftə",
"weeks": "{0} həftə",
"month": "bir ay",
"months": "{0} ay",
"year": "bir il",
"years": "{0} il",
}
month_names = [
"",
"Yanvar",
"Fevral",
"Mart",
"Aprel",
"May",
"İyun",
"İyul",
"Avqust",
"Sentyabr",
"Oktyabr",
"Noyabr",
"Dekabr",
]
month_abbreviations = [
"",
"Yan",
"Fev",
"Mar",
"Apr",
"May",
"İyn",
"İyl",
"Avq",
"Sen",
"Okt",
"Noy",
"Dek",
]
day_names = [
"",
"Bazar ertəsi",
"Çərşənbə axşamı",
"Çərşənbə",
"Cümə axşamı",
"Cümə",
"Şənbə",
"Bazar",
]
day_abbreviations = ["", "Ber", "Çax", "Çər", "Cax", "Cüm", "Şnb", "Bzr"]
| AzerbaijaniLocale |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_helper_functions.py | {
"start": 447,
"end": 3949
} | class ____:
"""Test helper functions from check commands."""
def test_validate_single_symbol_success(self):
"""Test _validate_single_symbol with a valid symbol."""
# Test with automation symbol that should be valid
result = _validate_single_symbol("automation.dagster_docs.validator.SymbolImporter")
# Should return 0 for success
assert result == 0
def test_validate_single_symbol_with_invalid_symbol(self):
"""Test _validate_single_symbol with invalid symbol."""
# Should return non-zero exit code for nonexistent symbol (not raise exception)
# The function handles the exception internally and returns an exit code
result = _validate_single_symbol("nonexistent.symbol")
assert result == 1 # Should return 1 for failure
def test_validate_package_symbols_success(self):
"""Test _validate_package_symbols with a valid package."""
# Test with automation package
result = _validate_package_symbols("automation.dagster_docs")
# Should return 0 or 1 (success or validation errors, but not crash)
assert result in [0, 1]
def test_validate_package_symbols_nonexistent_package(self):
"""Test _validate_package_symbols with nonexistent package."""
# Should raise ImportError for nonexistent package
with pytest.raises(ImportError):
_validate_package_symbols("nonexistent.package")
def test_find_git_root_current_repo(self):
"""Test _find_git_root in current repository (which should be a git repo)."""
result = _find_git_root()
# Should return a Path object since we're in a git repo
assert result is not None
assert isinstance(result, Path)
assert (result / ".git").exists()
@patch("automation.dagster_docs.commands.check.Path.cwd")
def test_find_git_root_not_found(self, mock_cwd):
"""Test _find_git_root when not in a git repository."""
# Create a mock path that doesn't have .git and is at filesystem root
mock_root = Mock(spec=Path)
mock_root.parent = mock_root # Simulate filesystem root
# Configure the __truediv__ method to return a mock .git directory
mock_git_dir = Mock()
mock_git_dir.exists.return_value = False
mock_root.__truediv__ = Mock(return_value=mock_git_dir)
mock_cwd.return_value = mock_root
result = _find_git_root()
# Should return None when no git repo found
assert result is None
@patch("automation.dagster_docs.commands.check._find_git_root")
def test_validate_changed_files_no_git_repo(self, mock_find_git_root):
"""Test _validate_changed_files when not in git repo."""
mock_find_git_root.return_value = None
result = _validate_changed_files()
# Should return 2 for "not in git repo" error
assert result == 2
@patch("automation.dagster_docs.commands.check.git_changed_files")
@patch("automation.dagster_docs.commands.check._find_git_root")
def test_validate_changed_files_no_changes(self, mock_find_git_root, mock_git_changed_files):
"""Test _validate_changed_files when no files are changed."""
mock_find_git_root.return_value = Path("/fake/git/root")
mock_git_changed_files.return_value = []
result = _validate_changed_files()
# Should return 0 for success when no changed files
assert result == 0
| TestCheckHelperFunctions |
python | protocolbuffers__protobuf | python/google/protobuf/internal/reflection_test.py | {
"start": 35740,
"end": 90326
} | class ____(unittest.TestCase):
def testRepeatedCompositeConstructor(self):
# Constructor with only repeated composite types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
repeatedgroup=[
unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)])
self.assertEqual(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEqual(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
self.assertEqual(
[unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)],
list(proto.repeatedgroup))
def assertListsEqual(self, values, others):
self.assertEqual(len(values), len(others))
for i in range(len(values)):
self.assertEqual(values[i], others[i])
def testSimpleHasBits(self):
# Test a scalar.
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.HasField('optional_int32'))
self.assertEqual(0, proto.optional_int32)
# HasField() shouldn't be true if all we've done is
# read the default value.
self.assertFalse(proto.HasField('optional_int32'))
proto.optional_int32 = 1
# Setting a value however *should* set the "has" bit.
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
# And clearing that value should unset the "has" bit.
self.assertFalse(proto.HasField('optional_int32'))
def testHasBitsWithSinglyNestedScalar(self):
# Helper used to test foreign messages and groups.
#
# composite_field_name should be the name of a non-repeated
# composite (i.e., foreign or group) field in TestAllTypes,
# and scalar_field_name should be the name of an integer-valued
# scalar field within that composite.
#
# I never thought I'd miss C++ macros and templates so much. :(
# This helper is semantically just:
#
# assert proto.composite_field.scalar_field == 0
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
#
# proto.composite_field.scalar_field = 10
# old_composite_field = proto.composite_field
#
# assert proto.composite_field.scalar_field == 10
# assert proto.composite_field.HasField('scalar_field')
# assert proto.HasField('composite_field')
#
# proto.ClearField('composite_field')
#
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
# assert proto.composite_field.scalar_field == 0
#
# # Now ensure that ClearField('composite_field') disconnected
# # the old field object from the object tree...
# assert old_composite_field is not proto.composite_field
# old_composite_field.scalar_field = 20
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
def TestCompositeHasBits(composite_field_name, scalar_field_name):
proto = unittest_pb2.TestAllTypes()
# First, check that we can get the scalar value, and see that it's the
# default (0), but that proto.HasField('omposite') and
# proto.composite.HasField('scalar') will still return False.
composite_field = getattr(proto, composite_field_name)
original_scalar_value = getattr(composite_field, scalar_field_name)
self.assertEqual(0, original_scalar_value)
# Assert that the composite object does not "have" the scalar.
self.assertFalse(composite_field.HasField(scalar_field_name))
# Assert that proto does not "have" the composite field.
self.assertFalse(proto.HasField(composite_field_name))
# Now set the scalar within the composite field. Ensure that the setting
# is reflected, and that proto.HasField('composite') and
# proto.composite.HasField('scalar') now both return True.
new_val = 20
setattr(composite_field, scalar_field_name, new_val)
self.assertEqual(new_val, getattr(composite_field, scalar_field_name))
# Hold on to a reference to the current composite_field object.
old_composite_field = composite_field
# Assert that the has methods now return true.
self.assertTrue(composite_field.HasField(scalar_field_name))
self.assertTrue(proto.HasField(composite_field_name))
# Now call the clear method...
proto.ClearField(composite_field_name)
# ...and ensure that the "has" bits are all back to False...
composite_field = getattr(proto, composite_field_name)
self.assertFalse(composite_field.HasField(scalar_field_name))
self.assertFalse(proto.HasField(composite_field_name))
# ...and ensure that the scalar field has returned to its default.
self.assertEqual(0, getattr(composite_field, scalar_field_name))
self.assertIsNot(old_composite_field, composite_field)
setattr(old_composite_field, scalar_field_name, new_val)
self.assertFalse(composite_field.HasField(scalar_field_name))
self.assertFalse(proto.HasField(composite_field_name))
self.assertEqual(0, getattr(composite_field, scalar_field_name))
# Test simple, single-level nesting when we set a scalar.
TestCompositeHasBits('optionalgroup', 'a')
TestCompositeHasBits('optional_nested_message', 'bb')
TestCompositeHasBits('optional_foreign_message', 'c')
TestCompositeHasBits('optional_import_message', 'd')
def testHasBitsWhenModifyingRepeatedFields(self):
# Test nesting when we add an element to a repeated field in a submessage.
proto = unittest_pb2.TestNestedMessageHasBits()
proto.optional_nested_message.nestedmessage_repeated_int32.append(5)
self.assertEqual(
[5], proto.optional_nested_message.nestedmessage_repeated_int32)
self.assertTrue(proto.HasField('optional_nested_message'))
# Do the same test, but with a repeated composite field within the
# submessage.
proto.ClearField('optional_nested_message')
self.assertFalse(proto.HasField('optional_nested_message'))
proto.optional_nested_message.nestedmessage_repeated_foreignmessage.add()
self.assertTrue(proto.HasField('optional_nested_message'))
def testHasBitsForManyLevelsOfNesting(self):
# Test nesting many levels deep.
recursive_proto = unittest_pb2.TestMutualRecursionA()
self.assertFalse(recursive_proto.HasField('bb'))
self.assertEqual(0, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertFalse(recursive_proto.HasField('bb'))
recursive_proto.bb.a.bb.a.bb.optional_int32 = 5
self.assertEqual(5, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(recursive_proto.HasField('bb'))
self.assertTrue(recursive_proto.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.HasField('bb'))
self.assertTrue(recursive_proto.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.HasField('bb'))
self.assertFalse(recursive_proto.bb.a.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.bb.HasField('optional_int32'))
def testSingularListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.optional_fixed32_extension] = 1
proto.Extensions[unittest_pb2.optional_int32_extension ] = 5
proto.Extensions[unittest_pb2.optional_string_extension ] = 'foo'
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 5),
(unittest_pb2.optional_fixed32_extension, 1),
(unittest_pb2.optional_string_extension , 'foo') ],
proto.ListFields())
del proto.Extensions[unittest_pb2.optional_fixed32_extension]
self.assertEqual(
[(unittest_pb2.optional_int32_extension, 5),
(unittest_pb2.optional_string_extension, 'foo')],
proto.ListFields())
def testRepeatedListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.repeated_fixed32_extension].append(1)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(5)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(11)
proto.Extensions[unittest_pb2.repeated_string_extension ].append('foo')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('bar')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('baz')
proto.Extensions[unittest_pb2.optional_int32_extension ] = 21
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 21),
(unittest_pb2.repeated_int32_extension , [5, 11]),
(unittest_pb2.repeated_fixed32_extension, [1]),
(unittest_pb2.repeated_string_extension , ['foo', 'bar', 'baz']) ],
proto.ListFields())
del proto.Extensions[unittest_pb2.repeated_int32_extension]
del proto.Extensions[unittest_pb2.repeated_string_extension]
self.assertEqual(
[(unittest_pb2.optional_int32_extension, 21),
(unittest_pb2.repeated_fixed32_extension, [1])],
proto.ListFields())
def testListFieldsAndExtensions(self):
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
unittest_pb2.my_extension_int
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['my_int' ], 1),
(unittest_pb2.my_extension_int , 23),
(proto.DESCRIPTOR.fields_by_name['my_string'], 'foo'),
(unittest_pb2.my_extension_string , 'bar'),
(proto.DESCRIPTOR.fields_by_name['my_float' ], 1.0) ],
proto.ListFields())
def testDefaultValues(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
self.assertEqual(0, proto.optional_int64)
self.assertEqual(0, proto.optional_uint32)
self.assertEqual(0, proto.optional_uint64)
self.assertEqual(0, proto.optional_sint32)
self.assertEqual(0, proto.optional_sint64)
self.assertEqual(0, proto.optional_fixed32)
self.assertEqual(0, proto.optional_fixed64)
self.assertEqual(0, proto.optional_sfixed32)
self.assertEqual(0, proto.optional_sfixed64)
self.assertEqual(0.0, proto.optional_float)
self.assertEqual(0.0, proto.optional_double)
self.assertEqual(False, proto.optional_bool)
self.assertEqual('', proto.optional_string)
self.assertEqual(b'', proto.optional_bytes)
self.assertEqual(41, proto.default_int32)
self.assertEqual(42, proto.default_int64)
self.assertEqual(43, proto.default_uint32)
self.assertEqual(44, proto.default_uint64)
self.assertEqual(-45, proto.default_sint32)
self.assertEqual(46, proto.default_sint64)
self.assertEqual(47, proto.default_fixed32)
self.assertEqual(48, proto.default_fixed64)
self.assertEqual(49, proto.default_sfixed32)
self.assertEqual(-50, proto.default_sfixed64)
self.assertEqual(51.5, proto.default_float)
self.assertEqual(52e3, proto.default_double)
self.assertEqual(True, proto.default_bool)
self.assertEqual('hello', proto.default_string)
self.assertEqual(b'world', proto.default_bytes)
self.assertEqual(unittest_pb2.TestAllTypes.BAR, proto.default_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_BAR, proto.default_foreign_enum)
self.assertEqual(unittest_import_pb2.IMPORT_BAR,
proto.default_import_enum)
proto = unittest_pb2.TestExtremeDefaultValues()
self.assertEqual(u'\u1234', proto.utf8_string)
def testHasFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.HasField, 'nonexistent_field')
def testClearRemovesChildren(self):
# Make sure there aren't any implementation bugs that are only partially
# clearing the message (which can happen in the more complex C++
# implementation which has parallel message lists).
proto = unittest_pb2.TestRequiredForeign()
for i in range(10):
proto.repeated_message.add()
proto2 = unittest_pb2.TestRequiredForeign()
proto.CopyFrom(proto2)
self.assertRaises(IndexError, lambda: proto.repeated_message[5])
def testSingleScalarClearField(self):
proto = unittest_pb2.TestAllTypes()
# Should be allowed to clear something that's not there (a no-op).
proto.ClearField('optional_int32')
proto.optional_int32 = 1
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
self.assertEqual(0, proto.optional_int32)
self.assertFalse(proto.HasField('optional_int32'))
# TODO: Test all other scalar field types.
def testRepeatedScalars(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(15)
self.assertTrue(proto.repeated_int32)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual([5, 10, 15], proto.repeated_int32)
# Test single retrieval.
self.assertEqual(5, proto.repeated_int32[0])
self.assertEqual(15, proto.repeated_int32[-1])
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, 1234)
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, -1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, 'foo')
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, None)
# Test single assignment.
proto.repeated_int32[1] = 20
self.assertEqual([5, 20, 15], proto.repeated_int32)
# Test insertion.
proto.repeated_int32.insert(1, 25)
self.assertEqual([5, 25, 20, 15], proto.repeated_int32)
# Test slice retrieval.
proto.repeated_int32.append(30)
self.assertEqual([25, 20, 15], proto.repeated_int32[1:4])
self.assertEqual([5, 25, 20, 15, 30], proto.repeated_int32[:])
# Test slice assignment with an iterator
proto.repeated_int32[1:4] = (i for i in range(3))
self.assertEqual([5, 0, 1, 2, 30], proto.repeated_int32)
# Test slice assignment.
proto.repeated_int32[1:4] = [35, 40, 45]
self.assertEqual([5, 35, 40, 45, 30], proto.repeated_int32)
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_int32:
result.append(i)
self.assertEqual([5, 35, 40, 45, 30], result)
# Test single deletion.
del proto.repeated_int32[2]
self.assertEqual([5, 35, 45, 30], proto.repeated_int32)
# Test slice deletion.
del proto.repeated_int32[2:]
self.assertEqual([5, 35], proto.repeated_int32)
# Test extending.
proto.repeated_int32.extend([3, 13])
self.assertEqual([5, 35, 3, 13], proto.repeated_int32)
# Test clearing.
proto.ClearField('repeated_int32')
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(1)
self.assertEqual(1, proto.repeated_int32[-1])
# Test assignment to a negative index.
proto.repeated_int32[-1] = 2
self.assertEqual(2, proto.repeated_int32[-1])
# Test deletion at negative indices.
proto.repeated_int32[:] = [0, 1, 2, 3]
del proto.repeated_int32[-1]
self.assertEqual([0, 1, 2], proto.repeated_int32)
del proto.repeated_int32[-2]
self.assertEqual([0, 2], proto.repeated_int32)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, -3)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, 300)
del proto.repeated_int32[-2:-1]
self.assertEqual([2], proto.repeated_int32)
del proto.repeated_int32[100:10000]
self.assertEqual([2], proto.repeated_int32)
def testRepeatedScalarsRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(5)
proto.repeated_int32.append(5)
self.assertEqual(4, len(proto.repeated_int32))
proto.repeated_int32.remove(5)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
self.assertEqual(5, proto.repeated_int32[2])
proto.repeated_int32.remove(5)
self.assertEqual(2, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
proto.repeated_int32.remove(10)
self.assertEqual(1, len(proto.repeated_int32))
self.assertEqual(5, proto.repeated_int32[0])
# Remove a non-existent element.
self.assertRaises(ValueError, proto.repeated_int32.remove, 123)
def testRepeatedScalarsReverse_Empty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
self.assertIsNone(proto.repeated_int32.reverse())
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
def testRepeatedScalarsReverse_NonEmpty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(1)
proto.repeated_int32.append(2)
proto.repeated_int32.append(3)
proto.repeated_int32.append(4)
self.assertEqual(4, len(proto.repeated_int32))
self.assertIsNone(proto.repeated_int32.reverse())
self.assertEqual(4, len(proto.repeated_int32))
self.assertEqual(4, proto.repeated_int32[0])
self.assertEqual(3, proto.repeated_int32[1])
self.assertEqual(2, proto.repeated_int32[2])
self.assertEqual(1, proto.repeated_int32[3])
def testRepeatedComposites(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
m1 = proto.repeated_nested_message.add()
self.assertTrue(proto.repeated_nested_message)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
self.assertIsInstance(m0, unittest_pb2.TestAllTypes.NestedMessage)
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
1234)
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
-1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
'foo')
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
None)
# Test slice retrieval.
m2 = proto.repeated_nested_message.add()
m3 = proto.repeated_nested_message.add()
m4 = proto.repeated_nested_message.add()
self.assertListsEqual(
[m1, m2, m3], proto.repeated_nested_message[1:4])
self.assertListsEqual(
[m0, m1, m2, m3, m4], proto.repeated_nested_message[:])
self.assertListsEqual(
[m0, m1], proto.repeated_nested_message[:2])
self.assertListsEqual(
[m2, m3, m4], proto.repeated_nested_message[2:])
self.assertEqual(
m0, proto.repeated_nested_message[0])
self.assertListsEqual(
[m0], proto.repeated_nested_message[:1])
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_nested_message:
result.append(i)
self.assertListsEqual([m0, m1, m2, m3, m4], result)
# Test single deletion.
del proto.repeated_nested_message[2]
self.assertListsEqual([m0, m1, m3, m4], proto.repeated_nested_message)
# Test slice deletion.
del proto.repeated_nested_message[2:]
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
# Test extending.
n1 = unittest_pb2.TestAllTypes.NestedMessage(bb=1)
n2 = unittest_pb2.TestAllTypes.NestedMessage(bb=2)
proto.repeated_nested_message.extend([n1,n2])
self.assertEqual(4, len(proto.repeated_nested_message))
self.assertEqual(n1, proto.repeated_nested_message[2])
self.assertEqual(n2, proto.repeated_nested_message[3])
self.assertRaises(TypeError,
proto.repeated_nested_message.extend, n1)
self.assertRaises(TypeError,
proto.repeated_nested_message.extend, [0])
wrong_message_type = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError,
proto.repeated_nested_message.extend,
[wrong_message_type])
# Test clearing.
proto.ClearField('repeated_nested_message')
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
# Test constructing an element while adding it.
proto.repeated_nested_message.add(bb=23)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(23, proto.repeated_nested_message[0].bb)
self.assertRaises(TypeError, proto.repeated_nested_message.add, 23)
with self.assertRaises(Exception):
proto.repeated_nested_message[0] = 23
def testRepeatedCompositeRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
# Need to set some differentiating variable so m0 != m1 != m2:
m0.bb = len(proto.repeated_nested_message)
m1 = proto.repeated_nested_message.add()
m1.bb = len(proto.repeated_nested_message)
self.assertTrue(m0 != m1)
m2 = proto.repeated_nested_message.add()
m2.bb = len(proto.repeated_nested_message)
self.assertListsEqual([m0, m1, m2], proto.repeated_nested_message)
self.assertEqual(3, len(proto.repeated_nested_message))
proto.repeated_nested_message.remove(m0)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
self.assertEqual(m2, proto.repeated_nested_message[1])
# Removing m0 again or removing None should raise error
self.assertRaises(ValueError, proto.repeated_nested_message.remove, m0)
self.assertRaises(ValueError, proto.repeated_nested_message.remove, None)
self.assertEqual(2, len(proto.repeated_nested_message))
proto.repeated_nested_message.remove(m2)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
def testRepeatedCompositeReverse_Empty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
self.assertIsNone(proto.repeated_nested_message.reverse())
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
def testRepeatedCompositeReverse_NonEmpty(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
m0.bb = len(proto.repeated_nested_message)
m1 = proto.repeated_nested_message.add()
m1.bb = len(proto.repeated_nested_message)
m2 = proto.repeated_nested_message.add()
m2.bb = len(proto.repeated_nested_message)
self.assertListsEqual([m0, m1, m2], proto.repeated_nested_message)
self.assertIsNone(proto.repeated_nested_message.reverse())
self.assertListsEqual([m2, m1, m0], proto.repeated_nested_message)
@testing_refleaks.SkipReferenceLeakChecker('This leaks in pure python')
def testHandWrittenReflection(self):
# Hand written extensions are only supported by the pure-Python
# implementation of the API.
if api_implementation.Type() != 'python':
return
file = descriptor.FileDescriptor(name='foo.proto', package='')
FieldDescriptor = descriptor.FieldDescriptor
foo_field_descriptor = FieldDescriptor(
name='foo_field', full_name='MyProto.foo_field',
index=0, number=1, type=FieldDescriptor.TYPE_INT64,
cpp_type=FieldDescriptor.CPPTYPE_INT64,
label=FieldDescriptor.LABEL_OPTIONAL, default_value=0,
containing_type=None, message_type=None, enum_type=None,
is_extension=False, extension_scope=None,
options=descriptor_pb2.FieldOptions(), file=file,
# pylint: disable=protected-access
create_key=descriptor._internal_create_key)
mydescriptor = descriptor.Descriptor(
name='MyProto', full_name='MyProto', filename='ignored',
containing_type=None, nested_types=[], enum_types=[],
fields=[foo_field_descriptor], extensions=[],
options=descriptor_pb2.MessageOptions(),
file=file,
# pylint: disable=protected-access
create_key=descriptor._internal_create_key)
class MyProtoClass(
message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = mydescriptor
myproto_instance = MyProtoClass()
self.assertEqual(0, myproto_instance.foo_field)
self.assertFalse(myproto_instance.HasField('foo_field'))
myproto_instance.foo_field = 23
self.assertEqual(23, myproto_instance.foo_field)
self.assertTrue(myproto_instance.HasField('foo_field'))
@testing_refleaks.SkipReferenceLeakChecker('MakeDescriptor is not repeatable')
def testDescriptorProtoSupport(self):
def AddDescriptorField(proto, field_name, field_type):
AddDescriptorField.field_index += 1
new_field = proto.field.add()
new_field.name = field_name
new_field.type = field_type
new_field.number = AddDescriptorField.field_index
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
AddDescriptorField.field_index = 0
desc_proto = descriptor_pb2.DescriptorProto()
desc_proto.name = 'Car'
fdp = descriptor_pb2.FieldDescriptorProto
AddDescriptorField(desc_proto, 'name', fdp.TYPE_STRING)
AddDescriptorField(desc_proto, 'year', fdp.TYPE_INT64)
AddDescriptorField(desc_proto, 'automatic', fdp.TYPE_BOOL)
AddDescriptorField(desc_proto, 'price', fdp.TYPE_DOUBLE)
# Add a repeated field
AddDescriptorField.field_index += 1
new_field = desc_proto.field.add()
new_field.name = 'owners'
new_field.type = fdp.TYPE_STRING
new_field.number = AddDescriptorField.field_index
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_REPEATED
desc = descriptor.MakeDescriptor(desc_proto)
self.assertTrue('name' in desc.fields_by_name)
self.assertTrue('year' in desc.fields_by_name)
self.assertTrue('automatic' in desc.fields_by_name)
self.assertTrue('price' in desc.fields_by_name)
self.assertTrue('owners' in desc.fields_by_name)
class CarMessage(
message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = desc
prius = CarMessage()
prius.name = 'prius'
prius.year = 2010
prius.automatic = True
prius.price = 25134.75
prius.owners.extend(['bob', 'susan'])
serialized_prius = prius.SerializeToString()
new_prius = message_factory.GetMessageClass(desc)()
new_prius.ParseFromString(serialized_prius)
self.assertIsNot(new_prius, prius)
self.assertEqual(prius, new_prius)
# these are unnecessary assuming message equality works as advertised but
# explicitly check to be safe since we're mucking about in metaclass foo
self.assertEqual(prius.name, new_prius.name)
self.assertEqual(prius.year, new_prius.year)
self.assertEqual(prius.automatic, new_prius.automatic)
self.assertEqual(prius.price, new_prius.price)
self.assertEqual(prius.owners, new_prius.owners)
def testExtensionDelete(self):
extendee_proto = more_extensions_pb2.ExtendedMessage()
extension_int32 = more_extensions_pb2.optional_int_extension
extendee_proto.Extensions[extension_int32] = 23
extension_repeated = more_extensions_pb2.repeated_int_extension
extendee_proto.Extensions[extension_repeated].append(11)
extension_msg = more_extensions_pb2.optional_message_extension
extendee_proto.Extensions[extension_msg].foreign_message_int = 56
self.assertEqual(len(extendee_proto.Extensions), 3)
del extendee_proto.Extensions[extension_msg]
self.assertEqual(len(extendee_proto.Extensions), 2)
del extendee_proto.Extensions[extension_repeated]
self.assertEqual(len(extendee_proto.Extensions), 1)
# Delete a none exist extension. It is OK to "del m.Extensions[ext]"
# even if the extension is not present in the message; we don't
# raise KeyError. This is consistent with "m.Extensions[ext]"
# returning a default value even if we did not set anything.
del extendee_proto.Extensions[extension_repeated]
self.assertEqual(len(extendee_proto.Extensions), 1)
del extendee_proto.Extensions[extension_int32]
self.assertEqual(len(extendee_proto.Extensions), 0)
@unittest.skipIf(
api_implementation.Type() == 'upb',
'This test relies on a specific iteration order for extensions, '
'which is not reasonable to guarantee.',
)
def testExtensionIter(self):
extendee_proto = more_extensions_pb2.ExtendedMessage()
extension_int32 = more_extensions_pb2.optional_int_extension
extendee_proto.Extensions[extension_int32] = 23
extension_repeated = more_extensions_pb2.repeated_int_extension
extendee_proto.Extensions[extension_repeated].append(11)
extension_msg = more_extensions_pb2.optional_message_extension
extendee_proto.Extensions[extension_msg].foreign_message_int = 56
# Set some normal fields.
extendee_proto.optional_int32 = 1
extendee_proto.repeated_string.append('hi')
expected = (extension_int32, extension_msg, extension_repeated)
count = 0
for item in extendee_proto.Extensions:
self.assertEqual(item.name, expected[count].name)
self.assertIn(item, extendee_proto.Extensions)
count += 1
self.assertEqual(count, 3)
def testExtensionContainsError(self):
extendee_proto = more_extensions_pb2.ExtendedMessage()
self.assertRaises(KeyError, extendee_proto.Extensions.__contains__, 0)
field = more_extensions_pb2.ExtendedMessage.DESCRIPTOR.fields_by_name[
'optional_int32']
self.assertRaises(KeyError, extendee_proto.Extensions.__contains__, field)
def testTopLevelExtensionsForOptionalScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_int32_extension
self.assertFalse(extendee_proto.HasExtension(extension))
self.assertNotIn(extension, extendee_proto.Extensions)
self.assertEqual(0, extendee_proto.Extensions[extension])
# As with normal scalar fields, just doing a read doesn't actually set the
# "has" bit.
self.assertFalse(extendee_proto.HasExtension(extension))
self.assertNotIn(extension, extendee_proto.Extensions)
# Actually set the thing.
extendee_proto.Extensions[extension] = 23
self.assertEqual(23, extendee_proto.Extensions[extension])
self.assertTrue(extendee_proto.HasExtension(extension))
self.assertIn(extension, extendee_proto.Extensions)
# Ensure that clearing works as well.
extendee_proto.ClearExtension(extension)
self.assertEqual(0, extendee_proto.Extensions[extension])
self.assertFalse(extendee_proto.HasExtension(extension))
self.assertNotIn(extension, extendee_proto.Extensions)
def testTopLevelExtensionsForRepeatedScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeated_string_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertNotIn(extension, extendee_proto.Extensions)
extendee_proto.Extensions[extension].append('foo')
self.assertEqual(['foo'], extendee_proto.Extensions[extension])
self.assertIn(extension, extendee_proto.Extensions)
string_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertNotIn(extension, extendee_proto.Extensions)
self.assertIsNot(string_list, extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForOptionalMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_foreign_message_extension
self.assertFalse(extendee_proto.HasExtension(extension))
self.assertNotIn(extension, extendee_proto.Extensions)
self.assertEqual(0, extendee_proto.Extensions[extension].c)
# As with normal (non-extension) fields, merely reading from the
# thing shouldn't set the "has" bit.
self.assertFalse(extendee_proto.HasExtension(extension))
self.assertNotIn(extension, extendee_proto.Extensions)
extendee_proto.Extensions[extension].c = 23
self.assertEqual(23, extendee_proto.Extensions[extension].c)
self.assertTrue(extendee_proto.HasExtension(extension))
self.assertIn(extension, extendee_proto.Extensions)
# Save a reference here.
foreign_message = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertIsNot(foreign_message, extendee_proto.Extensions[extension])
# Setting a field on foreign_message now shouldn't set
# any "has" bits on extendee_proto.
foreign_message.c = 42
self.assertEqual(42, foreign_message.c)
self.assertTrue(foreign_message.HasField('c'))
self.assertFalse(extendee_proto.HasExtension(extension))
self.assertNotIn(extension, extendee_proto.Extensions)
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForRepeatedMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeatedgroup_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
group = extendee_proto.Extensions[extension].add()
group.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension][0].a)
group.a = 42
self.assertEqual(42, extendee_proto.Extensions[extension][0].a)
group_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertIsNot(group_list, extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testNestedExtensions(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
# We just test the non-repeated case.
self.assertFalse(extendee_proto.HasExtension(extension))
self.assertNotIn(extension, extendee_proto.Extensions)
required = extendee_proto.Extensions[extension]
self.assertEqual(0, required.a)
self.assertFalse(extendee_proto.HasExtension(extension))
self.assertNotIn(extension, extendee_proto.Extensions)
required.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension].a)
self.assertTrue(extendee_proto.HasExtension(extension))
self.assertIn(extension, extendee_proto.Extensions)
extendee_proto.ClearExtension(extension)
self.assertIsNot(required, extendee_proto.Extensions[extension])
self.assertFalse(extendee_proto.HasExtension(extension))
self.assertNotIn(extension, extendee_proto.Extensions)
def testRegisteredExtensions(self):
pool = unittest_pb2.DESCRIPTOR.pool
self.assertTrue(
pool.FindExtensionByNumber(
unittest_pb2.TestAllExtensions.DESCRIPTOR, 1))
self.assertIs(
pool.FindExtensionByName(
'proto2_unittest.optional_int32_extension').containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
# Make sure extensions haven't been registered into types that shouldn't
# have any.
self.assertEqual(0, len(
pool.FindAllExtensions(unittest_pb2.TestAllTypes.DESCRIPTOR)))
# If message A directly contains message B, and
# a.HasField('b') is currently False, then mutating any
# extension in B should change a.HasField('b') to True
# (and so on up the object tree).
def testHasBitsForAncestorsOfExtendedMessage(self):
# Optional scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertFalse(toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertFalse(toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension] = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Repeated scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertFalse(toplevel.HasField('submessage'))
self.assertEqual([], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertFalse(toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension].append(23)
self.assertEqual([23], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Optional message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertFalse(toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertFalse(toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(toplevel.HasField('submessage'))
# Repeated message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertFalse(toplevel.HasField('submessage'))
self.assertEqual(0, len(toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension]))
self.assertFalse(toplevel.HasField('submessage'))
foreign = toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension].add()
self.assertEqual(foreign, toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension][0])
self.assertTrue(toplevel.HasField('submessage'))
def testDisconnectionAfterClearingEmptyMessage(self):
toplevel = more_extensions_pb2.TopLevelMessage()
extendee_proto = toplevel.submessage
extension = more_extensions_pb2.optional_message_extension
extension_proto = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
extension_proto.foreign_message_int = 23
self.assertIsNot(extension_proto, extendee_proto.Extensions[extension])
def testExtensionFailureModes(self):
extendee_proto = unittest_pb2.TestAllExtensions()
# Try non-extension-handle arguments to HasExtension,
# ClearExtension(), and Extensions[]...
self.assertRaises(KeyError, extendee_proto.HasExtension, 1234)
self.assertRaises(KeyError, extendee_proto.ClearExtension, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__, 1234, 5)
# Try something that *is* an extension handle, just not for
# this message...
for unknown_handle in (more_extensions_pb2.optional_int_extension,
more_extensions_pb2.optional_message_extension,
more_extensions_pb2.repeated_int_extension,
more_extensions_pb2.repeated_message_extension):
self.assertRaises(KeyError, extendee_proto.HasExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.ClearExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__,
unknown_handle, 5)
# Try call HasExtension() with a valid handle, but for a
# *repeated* field. (Just as with non-extension repeated
# fields, Has*() isn't supported for extension repeated fields).
self.assertRaises(KeyError, extendee_proto.HasExtension,
unittest_pb2.repeated_string_extension)
def testMergeFromOptionalGroup(self):
# Test merge with an optional group.
proto1 = unittest_pb2.TestAllTypes()
proto1.optionalgroup.a = 12
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
self.assertEqual(12, proto2.optionalgroup.a)
def testMergeFromExtensionsSingular(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.optional_int32_extension] = 1
proto2 = unittest_pb2.TestAllExtensions()
proto2.MergeFrom(proto1)
self.assertEqual(
1, proto2.Extensions[unittest_pb2.optional_int32_extension])
def testMergeFromExtensionsRepeated(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(1)
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(2)
proto2 = unittest_pb2.TestAllExtensions()
proto2.Extensions[unittest_pb2.repeated_int32_extension].append(0)
proto2.MergeFrom(proto1)
self.assertEqual(
3, len(proto2.Extensions[unittest_pb2.repeated_int32_extension]))
self.assertEqual(
0, proto2.Extensions[unittest_pb2.repeated_int32_extension][0])
self.assertEqual(
1, proto2.Extensions[unittest_pb2.repeated_int32_extension][1])
self.assertEqual(
2, proto2.Extensions[unittest_pb2.repeated_int32_extension][2])
def testMergeFromExtensionsNestedMessage(self):
proto1 = unittest_pb2.TestAllExtensions()
ext1 = proto1.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext1.add()
m.bb = 222
m = ext1.add()
m.bb = 333
proto2 = unittest_pb2.TestAllExtensions()
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext2.add()
m.bb = 111
proto2.MergeFrom(proto1)
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
self.assertEqual(3, len(ext2))
self.assertEqual(111, ext2[0].bb)
self.assertEqual(222, ext2[1].bb)
self.assertEqual(333, ext2[2].bb)
def testRepeatedExtensionsIn(self):
msg = more_extensions_pb2.ExtendedMessage()
ext = more_extensions_pb2.repeated_int_extension
self.assertNotIn(ext, msg.Extensions)
msg.Extensions[ext].append(123)
self.assertIn(ext, msg.Extensions)
def testCopyFromBadType(self):
proto1 = unittest_pb2.TestAllTypes()
proto2 = unittest_pb2.TestAllExtensions()
self.assertRaises(TypeError, proto1.CopyFrom, proto2)
def testClear(self):
proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto)
# Clear the message.
proto.Clear()
self.assertEqual(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllTypes()
self.assertEqual(proto, empty_proto)
# Test if extensions which were set are cleared.
proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(proto)
# Clear the message.
proto.Clear()
self.assertEqual(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(proto, empty_proto)
def testDisconnectingInOneof(self):
m = unittest_pb2.TestOneof2() # This message has two messages in a oneof.
m.foo_message.moo_int = 5
sub_message = m.foo_message
# Accessing another message's field does not clear the first one
self.assertEqual(m.foo_lazy_message.moo_int, 0)
self.assertEqual(m.foo_message.moo_int, 5)
# But mutating another message in the oneof detaches the first one.
m.foo_lazy_message.moo_int = 6
self.assertEqual(m.foo_message.moo_int, 0)
# The reference we got above was detached and is still valid.
self.assertEqual(sub_message.moo_int, 5)
sub_message.moo_int = 7
def assertInitialized(self, proto):
self.assertTrue(proto.IsInitialized())
# Neither method should raise an exception.
proto.SerializeToString()
proto.SerializePartialToString()
def assertNotInitialized(self, proto, error_size=None):
errors = []
self.assertFalse(proto.IsInitialized())
self.assertFalse(proto.IsInitialized(errors))
self.assertEqual(error_size, len(errors))
self.assertRaises(message.EncodeError, proto.SerializeToString)
# "Partial" serialization doesn't care if message is uninitialized.
proto.SerializePartialToString()
def testIsInitialized(self):
# Trivial cases - all optional fields and extensions.
proto = unittest_pb2.TestAllTypes()
self.assertInitialized(proto)
proto = unittest_pb2.TestAllExtensions()
self.assertInitialized(proto)
# The case of uninitialized required fields.
proto = unittest_pb2.TestRequired()
self.assertNotInitialized(proto, 3)
proto.a = proto.b = proto.c = 2
self.assertInitialized(proto)
# The case of uninitialized submessage.
proto = unittest_pb2.TestRequiredForeign()
self.assertInitialized(proto)
proto.optional_message.a = 1
self.assertNotInitialized(proto, 2)
proto.optional_message.b = 0
proto.optional_message.c = 0
self.assertInitialized(proto)
# Uninitialized repeated submessage.
message1 = proto.repeated_message.add()
self.assertNotInitialized(proto, 3)
message1.a = message1.b = message1.c = 0
self.assertInitialized(proto)
# Uninitialized repeated group in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.multi
message1 = proto.Extensions[extension].add()
message2 = proto.Extensions[extension].add()
self.assertNotInitialized(proto, 6)
message1.a = 1
message1.b = 1
message1.c = 1
self.assertNotInitialized(proto, 3)
message2.a = 2
message2.b = 2
message2.c = 2
self.assertInitialized(proto)
# Uninitialized nonrepeated message in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
proto.Extensions[extension].a = 1
self.assertNotInitialized(proto, 2)
proto.Extensions[extension].b = 2
proto.Extensions[extension].c = 3
self.assertInitialized(proto)
# Try passing an errors list.
errors = []
proto = unittest_pb2.TestRequired()
self.assertFalse(proto.IsInitialized(errors))
self.assertEqual(errors, ['a', 'b', 'c'])
self.assertRaises(TypeError, proto.IsInitialized, 1, 2, 3)
@unittest.skipIf(
api_implementation.Type() == 'python',
'Errors are only available from the most recent C++ implementation.')
def testFileDescriptorErrors(self):
file_name = 'test_file_descriptor_errors.proto'
package_name = 'test_file_descriptor_errors.proto'
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.name = file_name
file_descriptor_proto.package = package_name
m1 = file_descriptor_proto.message_type.add()
m1.name = 'msg1'
# Compiles the proto into the C++ descriptor pool
descriptor.FileDescriptor(
file_name,
package_name,
serialized_pb=file_descriptor_proto.SerializeToString())
# Add a FileDescriptorProto that has duplicate symbols
another_file_name = 'another_test_file_descriptor_errors.proto'
file_descriptor_proto.name = another_file_name
m2 = file_descriptor_proto.message_type.add()
m2.name = 'msg2'
with self.assertRaises(TypeError) as cm:
descriptor.FileDescriptor(
another_file_name,
package_name,
serialized_pb=file_descriptor_proto.SerializeToString())
self.assertTrue(hasattr(cm, 'exception'), '%s not raised' %
getattr(cm.expected, '__name__', cm.expected))
self.assertIn('test_file_descriptor_errors.proto', str(cm.exception))
# Error message will say something about this definition being a
# duplicate, though we don't check the message exactly to avoid a
# dependency on the C++ logging code.
self.assertIn('test_file_descriptor_errors.msg1', str(cm.exception))
def testDescriptorProtoHasFileOptions(self):
self.assertTrue(descriptor_pb2.DESCRIPTOR.has_options)
self.assertEqual(
descriptor_pb2.DESCRIPTOR.GetOptions().java_package,
'com.google.protobuf',
)
def testDescriptorProtoHasFieldOptions(self):
self.assertTrue(descriptor_pb2.DESCRIPTOR.has_options)
self.assertEqual(
descriptor_pb2.DESCRIPTOR.GetOptions().java_package,
'com.google.protobuf',
)
packed_desc = (
descriptor_pb2.SourceCodeInfo.DESCRIPTOR.nested_types_by_name.get(
'Location'
).fields_by_name.get('path')
)
self.assertTrue(packed_desc.has_options)
self.assertTrue(packed_desc.GetOptions().packed)
def testDescriptorProtoHasFeatureOptions(self):
self.assertTrue(descriptor_pb2.DESCRIPTOR.has_options)
self.assertEqual(
descriptor_pb2.DESCRIPTOR.GetOptions().java_package,
'com.google.protobuf',
)
presence_desc = descriptor_pb2.FeatureSet.DESCRIPTOR.fields_by_name.get(
'field_presence'
)
self.assertTrue(presence_desc.has_options)
self.assertEqual(
presence_desc.GetOptions().retention,
descriptor_pb2.FieldOptions.OptionRetention.RETENTION_RUNTIME,
)
self.assertListsEqual(
presence_desc.GetOptions().targets,
[
descriptor_pb2.FieldOptions.OptionTargetType.TARGET_TYPE_FIELD,
descriptor_pb2.FieldOptions.OptionTargetType.TARGET_TYPE_FILE,
],
)
def testStringUTF8Serialization(self):
proto = message_set_extensions_pb2.TestMessageSet()
extension_message = message_set_extensions_pb2.TestMessageSetExtension2
extension = extension_message.message_set_extension
test_utf8 = u'Тест'
test_utf8_bytes = test_utf8.encode('utf-8')
# 'Test' in another language, using UTF-8 charset.
proto.Extensions[extension].str = test_utf8
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
# Check byte size.
self.assertEqual(proto.ByteSize(), len(serialized))
raw = unittest_mset_pb2.RawMessageSet()
bytes_read = raw.MergeFromString(serialized)
self.assertEqual(len(serialized), bytes_read)
message2 = message_set_extensions_pb2.TestMessageSetExtension2()
self.assertEqual(1, len(raw.item))
# Check that the type_id is the same as the tag ID in the .proto file.
self.assertEqual(raw.item[0].type_id, 98418634)
# Check the actual bytes on the wire.
self.assertTrue(raw.item[0].message.endswith(test_utf8_bytes))
bytes_read = message2.MergeFromString(raw.item[0].message)
self.assertEqual(len(raw.item[0].message), bytes_read)
self.assertEqual(type(message2.str), str)
self.assertEqual(message2.str, test_utf8)
# The pure Python API throws an exception on MergeFromString(),
# if any of the string fields of the message can't be UTF-8 decoded.
# The C++ implementation of the API has no way to check that on
# MergeFromString and thus has no way to throw the exception.
#
# The pure Python API always returns objects of type 'unicode' (UTF-8
# encoded), or 'bytes' (in 7 bit ASCII).
badbytes = raw.item[0].message.replace(
test_utf8_bytes, len(test_utf8_bytes) * b'\xff')
unicode_decode_failed = False
try:
message2.MergeFromString(badbytes)
except UnicodeDecodeError:
unicode_decode_failed = True
string_field = message2.str
self.assertTrue(unicode_decode_failed or type(string_field) is bytes)
def testSetInParent(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.HasField('optionalgroup'))
proto.optionalgroup.SetInParent()
self.assertTrue(proto.HasField('optionalgroup'))
def testPackageInitializationImport(self):
"""Test that we can import nested messages from their __init__.py.
Such setup is not trivial since at the time of processing of __init__.py one
can't refer to its submodules by name in code, so expressions like
google.protobuf.internal.import_test_package.inner_pb2
don't work. They do work in imports, so we have assign an alias at import
and then use that alias in generated code.
"""
# We import here since it's the import that used to fail, and we want
# the failure to have the right context.
# pylint: disable=g-import-not-at-top
from google.protobuf.internal import import_test_package
# pylint: enable=g-import-not-at-top
msg = import_test_package.myproto.Outer()
# Just check the default value.
self.assertEqual(57, msg.inner.value)
# Since we had so many tests for protocol buffer equality, we broke these out
# into separate TestCase classes.
@testing_refleaks.TestCase
| Proto2ReflectionTest |
python | scrapy__scrapy | tests/test_request_dict.py | {
"start": 6328,
"end": 6577
} | class ____:
def delegated_callback(self, response):
pass
def parse_item(response):
pass
def handle_error(failure):
pass
def private_parse_item(response):
pass
def private_handle_error(failure):
pass
| SpiderDelegation |
python | ray-project__ray | python/ray/serve/metrics.py | {
"start": 5108,
"end": 7063
} | class ____(metrics.Gauge):
"""Gauges keep the last recorded value and drop everything before.
This corresponds to Prometheus' gauge metric:
https://prometheus.io/docs/concepts/metric_types/#gauge
Serve-related tags ("deployment", "replica", "application", "route")
are added automatically if not provided.
.. code-block:: python
@serve.deployment
class MyDeployment:
def __init__(self):
self.num_requests = 0
self.my_gauge = metrics.Gauge(
"my_gauge",
description=("The current memory usage."),
tag_keys=("model",),
)
self.my_counter.set_default_tags({"model": "123"})
def __call__(self):
process = psutil.Process()
self.gauge.set(process.memory_info().rss)
Args:
name: Name of the metric.
description: Description of the metric.
tag_keys: Tag keys of the metric.
"""
def __init__(
self, name: str, description: str = "", tag_keys: Optional[Tuple[str]] = None
):
if tag_keys and not isinstance(tag_keys, tuple):
raise TypeError(
"tag_keys should be a tuple type, got: " f"{type(tag_keys)}"
)
tag_keys = _add_serve_metric_tags(tag_keys)
super().__init__(name, description, tag_keys)
self.set_default_tags({})
def set_default_tags(self, default_tags: Dict[str, str]):
super().set_default_tags(_add_serve_metric_default_tags(default_tags))
def set(self, value: Union[int, float], tags: Dict[str, str] = None):
"""Set the gauge to the given value, add serve context
tag values to the tags
"""
_add_serve_context_tag_values(self._tag_keys, tags)
super().set(value, tags)
@PublicAPI(stability="beta")
| Gauge |
python | django__django | django/urls/converters.py | {
"start": 558,
"end": 627
} | class ____(StringConverter):
regex = "[-a-zA-Z0-9_]+"
| SlugConverter |
python | huggingface__transformers | src/transformers/models/siglip2/modeling_siglip2.py | {
"start": 13401,
"end": 15545
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Union[Siglip2VisionConfig, Siglip2TextConfig]):
super().__init__()
self.embed_dim = config.hidden_size
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.self_attn = Siglip2Attention(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = Siglip2MLP(config)
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
def variance_scaling_(tensor, mode="fan_in", distribution="normal"):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == "fan_in":
denom = fan_in
elif mode == "fan_out":
denom = fan_out
elif mode == "fan_avg":
denom = (fan_in + fan_out) / 2
variance = 1.0 / denom
if distribution == "truncated_normal":
init.trunc_normal_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
elif distribution == "normal":
init.normal_(tensor, std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
init.uniform_(tensor, -bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
def default_flax_embed_init(tensor):
variance_scaling_(tensor, mode="fan_in", distribution="normal")
@auto_docstring
| Siglip2EncoderLayer |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/feature_store.py | {
"start": 24457,
"end": 28097
} | class ____(GoogleCloudBaseOperator, OperationHelper):
"""
Delete the Feature Online store.
This method initiates VertexAI Feature Online Store deletion request.
There should be no FeatureViews to be deleted successfully.
:param project_id: Required. The ID of the Google Cloud project that contains the feature store.
This is used to identify which project's resources to interact with.
:param location: Required. The location of the feature store (e.g., 'us-central1', 'us-east1').
This specifies the Google Cloud region where the feature store resources are located.
:param feature_online_store_id: Required. The ID of the online feature store that contains
the feature view to be synchronized. This store serves as the online serving layer.
:param force: If set to true, any FeatureViews and Features for this FeatureOnlineStore
will also be deleted.
:param gcp_conn_id: The connection ID to use for connecting to Google Cloud Platform.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials. Can be either a single account or a chain of accounts required to
get the access_token of the last account in the list, which will be impersonated
in the request. If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role. If set as a sequence, the identities
from the list must grant Service Account Token Creator IAM role to the directly
preceding identity, with first account from the list granting this role to the
originating account.
"""
template_fields: Sequence[str] = (
"project_id",
"location",
"feature_online_store_id",
)
def __init__(
self,
*,
project_id: str,
location: str,
feature_online_store_id: str,
force: bool = False,
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.feature_online_store_id = feature_online_store_id
self.force = force
self.timeout = timeout
self.retry = retry
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict[str, Any]:
"""Execute the get feature view sync operation."""
hook = FeatureStoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting the Feature Online Store...")
result_operation = hook.delete_feature_online_store(
project_id=self.project_id,
location=self.location,
feature_online_store_id=self.feature_online_store_id,
force=self.force,
timeout=self.timeout,
retry=self.retry,
metadata=self.metadata,
)
self.wait_for_operation_result(operation=result_operation)
self.log.info("The Feature Online Store deletion has been complete: %s", self.feature_online_store_id)
return {"result": f"The {self.feature_online_store_id} has been deleted."}
| DeleteFeatureOnlineStoreOperator |
python | huggingface__transformers | src/transformers/models/donut/modeling_donut_swin.py | {
"start": 22600,
"end": 23111
} | class ____(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinLayer with Swin->DonutSwin
| DonutSwinOutput |
python | huggingface__transformers | src/transformers/models/olmoe/configuration_olmoe.py | {
"start": 710,
"end": 8694
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`OlmoeModel`]. It is used to instantiate an OLMoE
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the [allenai/OLMoE-1B-7B-0924](https://huggingface.co/allenai/OLMoE-1B-7B-0924).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the OLMoE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`OlmoeModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 16):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50279):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
clip_qkv (`float`, *optional*):
If not `None`, elements of query, key and value attention states are clipped so that their
absolute value does not exceed this value.
num_experts_per_tok (`int`, *optional*, defaults to 8):
Number of selected experts.
num_experts (`int`, *optional*, defaults to 64):
Number of routed experts.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.01):
The aux loss factor for the total loss.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to normalize the topk probabilities.
```python
>>> from transformers import OlmoeModel, OlmoeConfig
>>> # Initializing a OLMoE 7B A1B style configuration
>>> configuration = OlmoeConfig()
>>> # Initializing a model from the OLMoE 7B A1B style configuration
>>> model = OlmoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "olmoe"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_local_experts": "num_experts"}
def __init__(
self,
vocab_size: Optional[int] = 50304,
hidden_size: Optional[int] = 2048,
intermediate_size: Optional[int] = 2048,
num_hidden_layers: Optional[int] = 16,
num_attention_heads: Optional[int] = 16,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 4096,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-05,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 1,
bos_token_id: Optional[int] = None,
eos_token_id: Optional[int] = 50279,
tie_word_embeddings: Optional[int] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
clip_qkv: Optional[bool] = None,
num_experts_per_tok: Optional[int] = 8,
num_experts: Optional[int] = 64,
output_router_logits: Optional[bool] = False,
router_aux_loss_coef: Optional[float] = 0.01,
norm_topk_prob: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.clip_qkv = clip_qkv
self.num_experts_per_tok = num_experts_per_tok
self.num_experts = num_experts
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.norm_topk_prob = norm_topk_prob
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["OlmoeConfig"]
| OlmoeConfig |
python | wandb__wandb | tests/system_tests/test_importers/test_mlflow/conftest.py | {
"start": 662,
"end": 1394
} | class ____:
metrics_backend: Literal[
"mssql_backend",
"mysql_backend",
"postgres_backend",
"file_backend",
"sqlite_backend",
]
artifacts_backend: Literal["file_artifacts", "s3_artifacts"]
base_url: str = "http://localhost:4040"
health_endpoint: str = "health"
# helper if port is blocked
new_port: Optional[str] = None
def __post_init__(self):
self.new_port = self._get_free_port()
self.base_url = self.base_url.replace("4040", self.new_port)
@staticmethod
def _get_free_port():
import socket
sock = socket.socket()
sock.bind(("", 0))
return str(sock.getsockname()[1])
@dataclass
| MlflowServerSettings |
python | getsentry__sentry | tests/sentry/uptime/subscriptions/test_subscriptions.py | {
"start": 5543,
"end": 7033
} | class ____(UptimeTestCase):
def test(self) -> None:
with self.tasks():
uptime_sub = create_uptime_subscription("https://sentry.io", 300, 500)
uptime_sub.refresh_from_db()
prev_subscription_id = uptime_sub.subscription_id
url = "https://santry.io"
interval_seconds = 600
timeout_ms = 1000
method = "POST"
body = "some body"
trace_sampling = True
with self.tasks():
update_uptime_subscription(
uptime_sub,
url,
interval_seconds,
timeout_ms,
method,
[("something", "some_val")],
body=body,
trace_sampling=trace_sampling,
)
uptime_sub.refresh_from_db()
assert uptime_sub.subscription_id == prev_subscription_id
assert uptime_sub.status == UptimeSubscription.Status.ACTIVE.value
assert uptime_sub.type == UPTIME_SUBSCRIPTION_TYPE
assert uptime_sub.url == url
assert uptime_sub.url_domain == "santry"
assert uptime_sub.url_domain_suffix == "io"
assert uptime_sub.interval_seconds == interval_seconds
assert uptime_sub.timeout_ms == timeout_ms
assert uptime_sub.method == method
assert uptime_sub.headers == [["something", "some_val"]]
assert uptime_sub.body == body
assert uptime_sub.trace_sampling == trace_sampling
| UpdateUptimeSubscriptionTest |
python | django__django | tests/transactions/tests.py | {
"start": 22452,
"end": 23388
} | class ____(TransactionTestCase):
available_apps = []
def setUp(self):
transaction.set_autocommit(False)
self.addCleanup(transaction.set_autocommit, True)
self.addCleanup(transaction.rollback)
def test_orm_query_after_error_and_rollback(self):
"""
ORM queries are allowed after an error and a rollback in non-autocommit
mode (#27504).
"""
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
transaction.rollback()
Reporter.objects.last()
def test_orm_query_without_autocommit(self):
"""
#24921 -- ORM queries must be possible after set_autocommit(False).
"""
Reporter.objects.create(first_name="Tintin")
| NonAutocommitTests |
python | python-excel__xlwt | xlwt/Formatting.py | {
"start": 8310,
"end": 8543
} | class ____(object):
def __init__(self):
self.cell_locked = 1
self.formula_hidden = 0
def _search_key(self):
return (
self.cell_locked,
self.formula_hidden,
)
| Protection |
python | sympy__sympy | sympy/ntheory/elliptic_curve.py | {
"start": 7990,
"end": 11623
} | class ____:
"""
Point of Elliptic Curve
Examples
========
>>> from sympy.ntheory.elliptic_curve import EllipticCurve
>>> e1 = EllipticCurve(-17, 16)
>>> p1 = e1(0, -4, 1)
>>> p2 = e1(1, 0)
>>> p1 + p2
(15, -56)
>>> e3 = EllipticCurve(-1, 9)
>>> e3(1, -3) * 3
(664/169, 17811/2197)
>>> (e3(1, -3) * 3).order()
oo
>>> e2 = EllipticCurve(-2, 0, 0, 1, 1)
>>> p = e2(-1,1)
>>> q = e2(0, -1)
>>> p+q
(4, 8)
>>> p-q
(1, 0)
>>> 3*p-5*q
(328/361, -2800/6859)
"""
@staticmethod
def point_at_infinity(curve):
return EllipticCurvePoint(0, 1, 0, curve)
def __init__(self, x, y, z, curve):
dom = curve._domain.convert
self.x = dom(x)
self.y = dom(y)
self.z = dom(z)
self._curve = curve
self._domain = self._curve._domain
if not self._curve.__contains__(self):
raise ValueError("The curve does not contain this point")
def __add__(self, p):
if self.z == 0:
return p
if p.z == 0:
return self
x1, y1 = self.x/self.z, self.y/self.z
x2, y2 = p.x/p.z, p.y/p.z
a1 = self._curve._a1
a2 = self._curve._a2
a3 = self._curve._a3
a4 = self._curve._a4
a6 = self._curve._a6
if x1 != x2:
slope = (y1 - y2) / (x1 - x2)
yint = (y1 * x2 - y2 * x1) / (x2 - x1)
else:
if (y1 + y2) == 0:
return self.point_at_infinity(self._curve)
slope = (3 * x1**2 + 2*a2*x1 + a4 - a1*y1) / (a1 * x1 + a3 + 2 * y1)
yint = (-x1**3 + a4*x1 + 2*a6 - a3*y1) / (a1*x1 + a3 + 2*y1)
x3 = slope**2 + a1*slope - a2 - x1 - x2
y3 = -(slope + a1) * x3 - yint - a3
return self._curve(x3, y3, 1)
def __lt__(self, other):
return (self.x, self.y, self.z) < (other.x, other.y, other.z)
def __mul__(self, n):
n = as_int(n)
r = self.point_at_infinity(self._curve)
if n == 0:
return r
if n < 0:
return -self * -n
p = self
while n:
if n & 1:
r = r + p
n >>= 1
p = p + p
return r
def __rmul__(self, n):
return self * n
def __neg__(self):
if self.z == 0:
return self.point_at_infinity(self._curve)
return EllipticCurvePoint(self.x, -self.y - self._curve._a1*self.x - self._curve._a3, self.z, self._curve)
def __repr__(self):
if self.z == 0:
return 'O'
dom = self._curve._domain
try:
return '({}, {})'.format(dom.to_sympy(self.x), dom.to_sympy(self.y))
except TypeError:
pass
return '({}, {})'.format(self.x, self.y)
def __sub__(self, other):
return self.__add__(-other)
def order(self):
"""
Return point order n where nP = 0.
"""
if self.z == 0:
return 1
if self.y == 0: # P = -P
return 2
p = self * 2
if p.y == -self.y: # 2P = -P
return 3
i = 2
if self._domain != QQ:
while int(p.x) == p.x and int(p.y) == p.y:
p = self + p
i += 1
if p.z == 0:
return i
return oo
while p.x.numerator == p.x and p.y.numerator == p.y:
p = self + p
i += 1
if i > 12:
return oo
if p.z == 0:
return i
return oo
| EllipticCurvePoint |
python | pypa__pip | src/pip/_vendor/rich/emoji.py | {
"start": 361,
"end": 422
} | class ____(Exception):
"""No emoji by that name."""
| NoEmoji |
python | getsentry__sentry | src/sentry/workflow_engine/models/json_config.py | {
"start": 185,
"end": 529
} | class ____(models.Model):
config = models.JSONField(db_default={})
def validate_config(self, schema: dict[str, Any]) -> None:
try:
validate(self.config, schema)
except ValidationError as e:
raise ValidationError(f"Invalid config: {e.message}")
class Meta:
abstract = True
| JSONConfigBase |
python | openai__openai-python | src/openai/types/eval_create_response.py | {
"start": 2097,
"end": 2256
} | class ____(PythonGrader):
__test__ = False
pass_threshold: Optional[float] = None
"""The threshold for the score."""
| TestingCriterionEvalGraderPython |
python | python-attrs__attrs | tests/test_functional.py | {
"start": 981,
"end": 1038
} | class ____(Base):
y = attr.ib()
@attr.s(slots=True)
| Sub |
python | kamyu104__LeetCode-Solutions | Python/minimum-adjacent-swaps-for-k-consecutive-ones.py | {
"start": 29,
"end": 729
} | class ____(object):
def minMoves(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def moves(i, j):
return prefix[j+1]-prefix[i]
idxs = [i for i, x in enumerate(nums) if x]
prefix = [0]*(len(idxs)+1)
for i in xrange(len(idxs)):
prefix[i+1] = prefix[i]+idxs[i]
result = float("inf")
for i in xrange(len(idxs)-k+1):
result = min(result, -moves(i, i+k//2-1) + moves(i+(k+1)//2, i+k-1)) # take each i+k//2 as median, find min dist to median
result -= (k//2)*((k+1)//2) # rollback extra moves to the expected positions
return result
| Solution |
python | getsentry__sentry | src/sentry/api/endpoints/organization_sampling_effective_sample_rate.py | {
"start": 879,
"end": 1007
} | class ____(TypedDict):
effectiveSampleRate: float | None
@region_silo_endpoint
| OrganizationSamplingEffectiveSampleRateResponse |
python | pytorch__pytorch | test/distributed/elastic/utils/util_test.py | {
"start": 7796,
"end": 8415
} | class ____(TestCase):
def test_get_logger_different(self):
logger1 = get_logger("name1")
logger2 = get_logger("name2")
self.assertNotEqual(logger1.name, logger2.name)
def test_get_logger(self):
logger1 = get_logger()
self.assertEqual(__name__, logger1.name)
def test_get_logger_none(self):
logger1 = get_logger(None)
self.assertEqual(__name__, logger1.name)
def test_get_logger_custom_name(self):
logger1 = get_logger("test.module")
self.assertEqual("test.module", logger1.name)
if __name__ == "__main__":
run_tests()
| UtilTest |
python | bokeh__bokeh | src/bokeh/models/scales.py | {
"start": 2939,
"end": 3207
} | class ____(ContinuousScale):
''' Represent a log scale transformation between continuous ranges.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| LogScale |
python | django__django | tests/admin_views/tests.py | {
"start": 193601,
"end": 200519
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.per1 = Person.objects.create(name="John Mauchly", gender=1, alive=True)
cls.per2 = Person.objects.create(name="Grace Hopper", gender=1, alive=False)
cls.per3 = Person.objects.create(name="Guido van Rossum", gender=1, alive=True)
Person.objects.create(name="John Doe", gender=1)
Person.objects.create(name='John O"Hara', gender=1)
Person.objects.create(name="John O'Hara", gender=1)
cls.t1 = Recommender.objects.create()
cls.t2 = Recommendation.objects.create(the_recommender=cls.t1)
cls.t3 = Recommender.objects.create()
cls.t4 = Recommendation.objects.create(the_recommender=cls.t3)
cls.tt1 = TitleTranslation.objects.create(title=cls.t1, text="Bar")
cls.tt2 = TitleTranslation.objects.create(title=cls.t2, text="Foo")
cls.tt3 = TitleTranslation.objects.create(title=cls.t3, text="Few")
cls.tt4 = TitleTranslation.objects.create(title=cls.t4, text="Bas")
def setUp(self):
self.client.force_login(self.superuser)
def test_search_on_sibling_models(self):
"A search that mentions sibling models"
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
# confirm the search returned 1 object
self.assertContains(response, "\n1 recommendation\n")
def test_with_fk_to_field(self):
"""
The to_field GET parameter is preserved when a search is performed.
Refs #10918.
"""
response = self.client.get(
reverse("admin:auth_user_changelist") + "?q=joe&%s=id" % TO_FIELD_VAR
)
self.assertContains(response, "\n1 user\n")
self.assertContains(
response,
'<input type="hidden" name="%s" value="id">' % TO_FIELD_VAR,
html=True,
)
def test_exact_matches(self):
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
# confirm the search returned one object
self.assertContains(response, "\n1 recommendation\n")
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=ba"
)
# confirm the search returned zero objects
self.assertContains(response, "\n0 recommendations\n")
def test_beginning_matches(self):
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=Gui"
)
# confirm the search returned one object
self.assertContains(response, "\n1 person\n")
self.assertContains(response, "Guido")
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=uido"
)
# confirm the search returned zero objects
self.assertContains(response, "\n0 persons\n")
self.assertNotContains(response, "Guido")
def test_pluggable_search(self):
PluggableSearchPerson.objects.create(name="Bob", age=10)
PluggableSearchPerson.objects.create(name="Amy", age=20)
response = self.client.get(
reverse("admin:admin_views_pluggablesearchperson_changelist") + "?q=Bob"
)
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Bob")
response = self.client.get(
reverse("admin:admin_views_pluggablesearchperson_changelist") + "?q=20"
)
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Amy")
def test_reset_link(self):
"""
Test presence of reset link in search bar ("1 result (_x total_)").
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
# + 1 for total count
with self.assertNumQueries(5):
response = self.client.get(
reverse("admin:admin_views_person_changelist") + "?q=Gui"
)
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">6 total</a>)</span>""",
html=True,
)
def test_no_total_count(self):
"""
#8408 -- "Show all" should be displayed instead of the total count if
ModelAdmin.show_full_result_count is False.
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
with self.assertNumQueries(4):
response = self.client.get(
reverse("admin:admin_views_recommendation_changelist") + "?q=bar"
)
self.assertContains(
response,
"""<span class="small quiet">1 result (<a href="?">Show all</a>)</span>""",
html=True,
)
self.assertTrue(response.context["cl"].show_admin_actions)
def test_search_with_spaces(self):
url = reverse("admin:admin_views_person_changelist") + "?q=%s"
tests = [
('"John Doe"', 1),
("'John Doe'", 1),
("John Doe", 0),
('"John Doe" John', 1),
("'John Doe' John", 1),
("John Doe John", 0),
('"John Do"', 1),
("'John Do'", 1),
("'John O'Hara'", 0),
("'John O\\'Hara'", 1),
('"John O"Hara"', 0),
('"John O\\"Hara"', 1),
]
for search, hits in tests:
with self.subTest(search=search):
response = self.client.get(url % search)
self.assertContains(response, "\n%s person" % hits)
@override_settings(ROOT_URLCONF="admin_views.urls")
| AdminSearchTest |
python | readthedocs__readthedocs.org | readthedocs/proxito/tests/test_custom_path_prefixes.py | {
"start": 376,
"end": 23842
} | class ____(BaseDocServing):
def test_custom_prefix_multi_version_project(self):
self.project.custom_prefix = "/custom/prefix/"
self.project.save()
host = "project.readthedocs.io"
# Root redirect.
resp = self.client.get("/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/custom/prefix/en/latest/"
)
resp = self.client.get("/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 404)
# Trailing slash redirect
resp = self.client.get("/custom/prefix/en/latest", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/custom/prefix/en/latest/"
)
resp = self.client.get("/custom/prefix/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/index.html",
)
resp = self.client.get(
"/custom/prefix/en/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/api/index.html",
)
def test_custom_prefix_multi_version_project_translation(self):
self.project.custom_prefix = "/custom/prefix/"
self.project.save()
host = "project.readthedocs.io"
resp = self.client.get("/es/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 404)
# Trailing slash redirect
resp = self.client.get("/custom/prefix/es/latest", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/custom/prefix/es/latest/"
)
resp = self.client.get("/custom/prefix/es/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/translation/latest/index.html",
)
resp = self.client.get(
"/custom/prefix/es/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/translation/latest/api/index.html",
)
def test_custom_prefix_single_version_project(self):
self.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.project.custom_prefix = "/custom-prefix/"
self.project.save()
host = "project.readthedocs.io"
resp = self.client.get("/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/custom-prefix/"
)
# Trailing slash redirect
resp = self.client.get("/custom-prefix", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/custom-prefix/"
)
resp = self.client.get("/custom-prefix/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/index.html",
)
resp = self.client.get("/custom-prefix/api/index.html", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/api/index.html",
)
def test_custom_prefix_multiple_versions_without_translations_project(self):
self.project.versioning_scheme = MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS
self.project.custom_prefix = "/custom-prefix/"
self.project.save()
host = "project.readthedocs.io"
# Root redirect.
resp = self.client.get("/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/custom-prefix/latest/"
)
# Root prefix redirect.
resp = self.client.get("/custom-prefix/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/custom-prefix/latest/"
)
# Trailing slash redirect
resp = self.client.get("/custom-prefix/latest", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/custom-prefix/latest/"
)
resp = self.client.get("/custom-prefix/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/index.html",
)
resp = self.client.get(
"/custom-prefix/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/api/index.html",
)
def test_custom_subproject_prefix(self):
self.project.custom_subproject_prefix = "/custom/"
self.project.save()
host = "project.readthedocs.io"
# Root redirect for the main project.
resp = self.client.get("/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp["Location"], "http://project.readthedocs.io/en/latest/")
# Serving works on the main project.
resp = self.client.get("/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"], "/proxito/media/html/project/latest/index.html"
)
# Subproject to main project redirect
resp = self.client.get("/", headers={"host": "subproject.readthedocs.io"})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/custom/subproject/"
)
resp = self.client.get(
"/en/latest/", headers={"host": "subproject.readthedocs.io"}
)
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"],
"http://project.readthedocs.io/custom/subproject/en/latest/",
)
# Old paths
resp = self.client.get("/projects/subproject/", headers={"host": host})
self.assertEqual(resp.status_code, 404)
resp = self.client.get("/projects/subproject/en/latest", headers={"host": host})
self.assertEqual(resp.status_code, 404)
# Root redirect for the subproject
resp = self.client.get("/custom/subproject", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"],
"http://project.readthedocs.io/custom/subproject/en/latest/",
)
resp = self.client.get("/custom/subproject/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"],
"http://project.readthedocs.io/custom/subproject/en/latest/",
)
# Trailing slash redirect
resp = self.client.get("/custom/subproject/en/latest", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"],
"http://project.readthedocs.io/custom/subproject/en/latest/",
)
# Normal serving
resp = self.client.get("/custom/subproject/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/index.html",
)
resp = self.client.get(
"/custom/subproject/en/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/api/index.html",
)
def test_custom_subproject_prefix_empty(self):
self.project.custom_subproject_prefix = "/"
self.project.save()
host = "project.readthedocs.io"
# Root redirect for the main project.
resp = self.client.get("/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp["Location"], "http://project.readthedocs.io/en/latest/")
# Serving works on the main project.
resp = self.client.get("/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"], "/proxito/media/html/project/latest/index.html"
)
# Subproject to main project redirect
resp = self.client.get("/", headers={"host": "subproject.readthedocs.io"})
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp["Location"], "http://project.readthedocs.io/subproject/")
resp = self.client.get(
"/en/latest/", headers={"host": "subproject.readthedocs.io"}
)
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/subproject/en/latest/"
)
# Root redirect for the subproject
resp = self.client.get("/subproject", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/subproject/en/latest/"
)
resp = self.client.get("/subproject/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/subproject/en/latest/"
)
# Trailing slash redirect
resp = self.client.get("/subproject/en/latest", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/subproject/en/latest/"
)
# Normal serving
resp = self.client.get("/subproject/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/index.html",
)
resp = self.client.get(
"/subproject/en/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/api/index.html",
)
def test_custom_prefix_and_custom_subproject_prefix_in_superproject(self):
self.project.custom_prefix = "/prefix/"
self.project.custom_subproject_prefix = "/s/"
self.project.save()
host = "project.readthedocs.io"
# Root redirect for the main project.
resp = self.client.get("/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/prefix/en/latest/"
)
resp = self.client.get("/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 404)
# Serving works on the main project.
resp = self.client.get("/prefix/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"], "/proxito/media/html/project/latest/index.html"
)
# Subproject to main project redirect
resp = self.client.get("/", headers={"host": "subproject.readthedocs.io"})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/s/subproject/"
)
resp = self.client.get(
"/en/latest/", headers={"host": "subproject.readthedocs.io"}
)
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/s/subproject/en/latest/"
)
# Root redirect for the subproject
resp = self.client.get("/s/subproject", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/s/subproject/en/latest/"
)
resp = self.client.get("/s/subproject/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/s/subproject/en/latest/"
)
# Trailing slash redirect
resp = self.client.get("/s/subproject/en/latest", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/s/subproject/en/latest/"
)
# Normal serving
resp = self.client.get("/s/subproject/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/index.html",
)
resp = self.client.get(
"/s/subproject/en/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/api/index.html",
)
def test_custom_prefix_and_custom_subproject_prefix_with_translations(self):
self.project.custom_prefix = "/prefix/"
self.project.custom_subproject_prefix = "/s/"
self.project.save()
host = "project.readthedocs.io"
resp = self.client.get("/es/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 404)
# Serving works on the main project.
resp = self.client.get("/prefix/es/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/translation/latest/index.html",
)
# Normal serving
resp = self.client.get("/s/subproject/es/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject-translation/latest/index.html",
)
resp = self.client.get(
"/s/subproject/es/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject-translation/latest/api/index.html",
)
def test_custom_prefix_in_subproject_and_custom_prefix_in_superproject(self):
self.subproject.custom_prefix = "/prefix/"
self.subproject.save()
self.project.custom_subproject_prefix = "/s/"
self.project.save()
host = "project.readthedocs.io"
# Root redirect for the main project.
resp = self.client.get("/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp["Location"], "http://project.readthedocs.io/en/latest/")
# Serving works on the main project.
resp = self.client.get("/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"], "/proxito/media/html/project/latest/index.html"
)
# Subproject to main project redirect
resp = self.client.get("/", headers={"host": "subproject.readthedocs.io"})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/s/subproject/"
)
resp = self.client.get(
"/en/latest/", headers={"host": "subproject.readthedocs.io"}
)
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/s/subproject/en/latest/"
)
# Root redirect for the subproject
resp = self.client.get("/s/subproject", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"],
"http://project.readthedocs.io/s/subproject/prefix/en/latest/",
)
resp = self.client.get("/s/subproject/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"],
"http://project.readthedocs.io/s/subproject/prefix/en/latest/",
)
# Trailing slash redirect
resp = self.client.get("/s/subproject/prefix/en/latest", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"],
"http://project.readthedocs.io/s/subproject/prefix/en/latest/",
)
# Normal serving
resp = self.client.get(
"/s/subproject/prefix/en/latest/", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/index.html",
)
resp = self.client.get(
"/s/subproject/prefix/en/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/api/index.html",
)
def test_same_prefixes(self):
self.project.custom_prefix = "/prefix/"
self.project.custom_subproject_prefix = "/prefix/"
self.project.save()
host = "project.readthedocs.io"
resp = self.client.get("/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/prefix/en/latest/"
)
resp = self.client.get("/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 404)
# Serving works on the main project.
resp = self.client.get("/prefix/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"], "/proxito/media/html/project/latest/index.html"
)
# Root redirect for the subproject
resp = self.client.get("/prefix/subproject", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"],
"http://project.readthedocs.io/prefix/subproject/en/latest/",
)
# Normal serving
resp = self.client.get("/prefix/subproject/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/index.html",
)
resp = self.client.get(
"/prefix/subproject/en/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/api/index.html",
)
def test_valid_overlapping_prefixes(self):
self.project.custom_prefix = "/prefix/"
self.project.custom_subproject_prefix = "/prefix/s/"
self.project.save()
host = "project.readthedocs.io"
resp = self.client.get("/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/prefix/en/latest/"
)
resp = self.client.get("/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 404)
# Serving works on the main project.
resp = self.client.get("/prefix/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"], "/proxito/media/html/project/latest/index.html"
)
# Root redirect for the subproject
resp = self.client.get("/prefix/s/subproject", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"],
"http://project.readthedocs.io/prefix/s/subproject/en/latest/",
)
# Normal serving
resp = self.client.get(
"/prefix/s/subproject/en/latest/", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/index.html",
)
resp = self.client.get(
"/prefix/s/subproject/en/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/api/index.html",
)
def test_invalid_overlapping_prefixes(self):
self.project.custom_prefix = "/prefix/"
self.project.custom_subproject_prefix = "/prefix/es/"
self.project.save()
host = "project.readthedocs.io"
resp = self.client.get("/", headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["Location"], "http://project.readthedocs.io/prefix/en/latest/"
)
resp = self.client.get("/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 404)
# Serving works on the main project.
resp = self.client.get("/prefix/en/latest/", headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"], "/proxito/media/html/project/latest/index.html"
)
# We can't access to the subproject.
resp = self.client.get("/prefix/es/subproject/", headers={"host": host})
self.assertEqual(resp.status_code, 404)
resp = self.client.get(
"/prefix/es/subproject/en/latest/", headers={"host": host}
)
self.assertEqual(resp.status_code, 404)
resp = self.client.get(
"/prefix/es/subproject/en/latest/api/index.html", headers={"host": host}
)
self.assertEqual(resp.status_code, 404)
| TestCustomPathPrefixes |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/security_manager/test_override.py | {
"start": 1003,
"end": 1220
} | class ____(FabAirflowSecurityManagerOverride):
# noinspection PyMissingConstructor
# super() not called on purpose to avoid the whole chain of init calls
def __init__(self):
pass
| EmptySecurityManager |
python | encode__django-rest-framework | rest_framework/exceptions.py | {
"start": 6879,
"end": 8159
} | class ____(APIException):
status_code = status.HTTP_429_TOO_MANY_REQUESTS
default_detail = _('Request was throttled.')
extra_detail_singular = _('Expected available in {wait} second.')
extra_detail_plural = _('Expected available in {wait} seconds.')
default_code = 'throttled'
def __init__(self, wait=None, detail=None, code=None):
if detail is None:
detail = force_str(self.default_detail)
if wait is not None:
wait = math.ceil(wait)
detail = ' '.join((
detail,
force_str(ngettext(self.extra_detail_singular.format(wait=wait),
self.extra_detail_plural.format(wait=wait),
wait))))
self.wait = wait
super().__init__(detail, code)
def server_error(request, *args, **kwargs):
"""
Generic 500 error handler.
"""
data = {
'error': 'Server Error (500)'
}
return JsonResponse(data, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def bad_request(request, exception, *args, **kwargs):
"""
Generic 400 error handler.
"""
data = {
'error': 'Bad Request (400)'
}
return JsonResponse(data, status=status.HTTP_400_BAD_REQUEST)
| Throttled |
python | huggingface__transformers | src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py | {
"start": 21380,
"end": 22903
} | class ____(PreTrainedModel):
config: Ernie4_5_MoeConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Ernie4_5_MoeDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
_supports_attention_backend = True
_can_record_outputs = {
"router_logits": OutputRecorder(Ernie4_5_MoeTopKRouter, layer_name="mlp.gate", index=0),
"hidden_states": Ernie4_5_MoeDecoderLayer,
"attentions": Ernie4_5_MoeAttention,
}
# Not supporting multi-token prediction (MTP) atm
_keys_to_ignore_on_load_unexpected = ["mtp"]
_keep_in_fp32_modules_strict = ["gate.weight", "moe_statics"]
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Ernie4_5_MoeStatics):
init.zeros_(module.e_score_correction_bias)
elif isinstance(module, Ernie4_5_MoeExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
if module.gate_up_proj_bias is not None:
init.zeros_(module.gate_up_proj_bias)
init.zeros_(module.down_proj_bias)
@auto_docstring
| Ernie4_5_MoePreTrainedModel |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 119267,
"end": 119915
} | class ____(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
if __name__ == '__main__':
dl = DataLoader(
RandomDataset(64, (28, 28)),
batch_size=16,
num_workers=2,
pin_memory=True,
persistent_workers=True,
multiprocessing_context="fork",
)
for _ in dl:
break
""",
]
)
| RandomDataset |
python | huggingface__transformers | src/transformers/models/diffllama/modular_diffllama.py | {
"start": 19518,
"end": 19603
} | class ____(LlamaForSequenceClassification):
pass
| DiffLlamaForSequenceClassification |
python | scikit-learn__scikit-learn | sklearn/tests/metadata_routing_common.py | {
"start": 18261,
"end": 19241
} | class ____(MetaEstimatorMixin, ClassifierMixin, BaseEstimator):
"""A meta-estimator which also consumes sample_weight itself in ``fit``."""
def __init__(self, estimator, registry=None):
self.estimator = estimator
self.registry = registry
def fit(self, X, y, sample_weight=None, **kwargs):
if self.registry is not None:
self.registry.append(self)
record_metadata(self, sample_weight=sample_weight)
params = process_routing(self, "fit", sample_weight=sample_weight, **kwargs)
self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit)
return self
def get_metadata_routing(self):
router = (
MetadataRouter(owner=self)
.add_self_request(self)
.add(
estimator=self.estimator,
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
)
return router
| WeightedMetaClassifier |
python | sympy__sympy | sympy/functions/special/mathieu_functions.py | {
"start": 2028,
"end": 3499
} | class ____(MathieuBase):
r"""
The Mathieu Cosine function $C(a,q,z)$.
Explanation
===========
This function is one solution of the Mathieu differential equation:
.. math ::
y(x)^{\prime\prime} + (a - 2 q \cos(2 x)) y(x) = 0
The other solution is the Mathieu Sine function.
Examples
========
>>> from sympy import diff, mathieuc
>>> from sympy.abc import a, q, z
>>> mathieuc(a, q, z)
mathieuc(a, q, z)
>>> mathieuc(a, 0, z)
cos(sqrt(a)*z)
>>> diff(mathieuc(a, q, z), z)
mathieucprime(a, q, z)
See Also
========
mathieus: Mathieu sine function
mathieusprime: Derivative of Mathieu sine function
mathieucprime: Derivative of Mathieu cosine function
References
==========
.. [1] https://en.wikipedia.org/wiki/Mathieu_function
.. [2] https://dlmf.nist.gov/28
.. [3] https://mathworld.wolfram.com/MathieuFunction.html
.. [4] https://functions.wolfram.com/MathieuandSpheroidalFunctions/MathieuC/
"""
def fdiff(self, argindex=1):
if argindex == 3:
a, q, z = self.args
return mathieucprime(a, q, z)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, q, z):
if q.is_Number and q.is_zero:
return cos(sqrt(a)*z)
# Try to pull out factors of -1
if z.could_extract_minus_sign():
return cls(a, q, -z)
| mathieuc |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/log_write.py | {
"start": 119,
"end": 489
} | class ____(App):
def compose(self) -> ComposeResult:
yield Log()
def on_ready(self) -> None:
log = self.query_one(Log)
log.write("Hello,")
log.write(" World")
log.write("!\nWhat's up?")
log.write("")
log.write("\n")
log.write("FOO")
if __name__ == "__main__":
app = LogApp()
app.run()
| LogApp |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/test_backend_util.py | {
"start": 990,
"end": 2557
} | class ____:
"""Configurate test backends."""
def __init__(self, test_case: tf_test.TestCase):
self._test_case = test_case
# TODO(b/260771689): Refactor common backend set up logic to here.
def tearDown(self):
# Only need to explicitly shuts down TPU system in TFRT since in current
# runtime, the shutdown is done in initialization process.
if accelerator_util.is_initialized():
accelerator_util.shutdown_accelerator_system()
def slice_host_devices_for_multiworker(num_clients, client_id, ports):
"""Configure the current process to only use a slice of devices."""
if num_clients == 0:
# All GPUs are visible to the client.
del os.environ['CUDA_VISIBLE_DEVICES']
del os.environ['HIP_VISIBLE_DEVICES']
else:
# Make the client_id-th GPU visible to the client.
os.environ['CUDA_VISIBLE_DEVICES'] = f'{client_id}'
os.environ['HIP_VISIBLE_DEVICES'] = f'{client_id}'
# Make the client_id-th (4x) TPU cores visible to the client.
os.environ['CLOUD_TPU_TASK_ID'] = f'{client_id}'
if 'tpu' in DTENSOR_TEST_UTIL_BACKEND.value:
del ports # Unused due to lack of implementation.
# We need to find out if there is a way to slice a CloudTPU host to
# multiple workers.
raise NotImplementedError(
'OSS multi-client tests of TPU is not supported.'
)
def get_mp_context():
return multiprocessing.get_context('forkserver')
def handle_test_main(main, *args, **kwargs):
main(*args, **kwargs)
# LINT.ThenChange(test_backend_util.py)
| DTensorTestBackendConfigurator |
python | joke2k__faker | faker/providers/ssn/pl_PL/__init__.py | {
"start": 692,
"end": 1956
} | class ____(SsnProvider):
def ssn(self) -> str:
"""
Returns 11 character Polish national identity code (Public Electronic Census System,
Polish: Powszechny Elektroniczny System Ewidencji Ludności - PESEL).
It has the form YYMMDDZZZXQ, where YYMMDD is the date of birth (with century
encoded in month field), ZZZ is the personal identification number, X denotes sex
(even for females, odd for males) and Q is a parity number.
https://en.wikipedia.org/wiki/National_identification_number#Poland
"""
birth_date = self.generator.date_time()
pesel_digits = [
*divmod(birth_date.year % 100, 10),
*divmod(calculate_month(birth_date), 10),
*divmod(birth_date.day, 10),
]
for _ in range(4):
pesel_digits.append(self.random_digit())
pesel_digits.append(checksum(pesel_digits))
return "".join(str(digit) for digit in pesel_digits)
vat_id_formats = ("PL##########",)
def vat_id(self) -> str:
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Polish VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
| Provider |
python | kamyu104__LeetCode-Solutions | Python/balanced-binary-tree.py | {
"start": 182,
"end": 705
} | class ____(object):
# @param root, a tree node
# @return a boolean
def isBalanced(self, root):
def getHeight(root):
if root is None:
return 0
left_height, right_height = \
getHeight(root.left), getHeight(root.right)
if left_height < 0 or right_height < 0 or \
abs(left_height - right_height) > 1:
return -1
return max(left_height, right_height) + 1
return (getHeight(root) >= 0)
| Solution |
python | spyder-ide__spyder | spyder/plugins/console/plugin.py | {
"start": 839,
"end": 8215
} | class ____(SpyderDockablePlugin):
"""
Console widget
"""
NAME = 'internal_console'
WIDGET_CLASS = ConsoleWidget
OPTIONAL = [Plugins.MainMenu, Plugins.Editor]
CONF_SECTION = NAME
CONF_FILE = False
TABIFY = [Plugins.IPythonConsole, Plugins.History]
CAN_BE_DISABLED = False
RAISE_AND_FOCUS = True
# --- Signals
# ------------------------------------------------------------------------
sig_focus_changed = Signal() # TODO: I think this is not being used now?
sig_edit_goto_requested = Signal(str, int, str)
"""
This signal will request to open a file in a given row and column
using a code editor.
Parameters
----------
path: str
Path to file.
row: int
Cursor starting row position.
word: str
Word to select on given row.
"""
sig_refreshed = Signal()
"""This signal is emitted when the interpreter buffer is flushed."""
sig_help_requested = Signal(dict)
"""
This signal is emitted to request help on a given object `name`.
Parameters
----------
help_data: dict
Example `{'name': str, 'ignore_unknown': bool}`.
"""
# --- SpyderDockablePlugin API
# ------------------------------------------------------------------------
@staticmethod
def get_name():
return _('Internal console')
@classmethod
def get_icon(cls):
return QIcon()
@staticmethod
def get_description():
return _('An internal Python console running Spyder itself.')
def on_initialize(self):
widget = self.get_widget()
# Signals
widget.sig_edit_goto_requested.connect(self.sig_edit_goto_requested)
widget.sig_focus_changed.connect(self.sig_focus_changed)
widget.sig_refreshed.connect(self.sig_refreshed)
widget.sig_help_requested.connect(self.sig_help_requested)
# Crash handling
previous_crash = self.get_conf(
'previous_crash',
default='',
section='main',
)
if previous_crash:
error_data = dict(
text=previous_crash,
is_traceback=True,
title="Segmentation fault crash",
label=_("<h3>Spyder crashed during last session</h3>"),
steps=_("Please provide any additional information you "
"might have about the crash."),
)
widget.handle_exception(error_data)
@on_plugin_available(plugin=Plugins.MainMenu)
def on_main_menu_available(self):
widget = self.get_widget()
mainmenu = self.get_plugin(Plugins.MainMenu)
# Actions
mainmenu.add_item_to_application_menu(
widget.quit_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Restart)
@on_plugin_teardown(plugin=Plugins.MainMenu)
def on_main_menu_teardown(self):
mainmenu = self.get_plugin(Plugins.MainMenu)
mainmenu.remove_item_from_application_menu(
ConsoleWidgetActions.Quit,
menu_id=ApplicationMenus.File)
@on_plugin_available(plugin=Plugins.Editor)
def on_editor_available(self):
editor = self.get_plugin(Plugins.Editor)
self.sig_edit_goto_requested.connect(editor.load)
@on_plugin_teardown(plugin=Plugins.Editor)
def on_editor_teardown(self):
editor = self.get_plugin(Plugins.Editor)
self.sig_edit_goto_requested.disconnect(editor.load)
def update_font(self):
font = self.get_font(SpyderFontType.Monospace)
self.get_widget().set_font(font)
def on_close(self, cancelable=False):
self.get_widget().dialog_manager.close_all()
return True
def on_mainwindow_visible(self):
self.set_exit_function(self.main.closing)
# Hide this plugin when not in development so that people don't
# use it instead of the IPython console
if DEV is None:
self.toggle_view_action.setChecked(False)
self.dockwidget.hide()
# --- API
# ------------------------------------------------------------------------
@Slot()
def report_issue(self):
"""Report an issue with the SpyderErrorDialog."""
self.get_widget().report_issue()
@property
def error_dialog(self):
"""
Error dialog attribute accesor.
"""
return self.get_widget().error_dlg
def close_error_dialog(self):
"""
Close the error dialog if visible.
"""
self.get_widget().close_error_dlg()
def exit_interpreter(self):
"""
Exit the internal console interpreter.
This is equivalent to requesting the main application to quit.
"""
self.get_widget().exit_interpreter()
def execute_lines(self, lines):
"""
Execute the given `lines` of code in the internal console.
"""
self.get_widget().execute_lines(lines)
def get_sys_path(self):
"""
Return the system path of the internal console.
"""
return self.get_widget().get_sys_path()
@Slot(dict)
def handle_exception(self, error_data, sender=None):
"""
Handle any exception that occurs during Spyder usage.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if sender is None:
sender = self.sender()
self.get_widget().handle_exception(
error_data,
sender=sender
)
def quit(self):
"""
Send the quit request to the main application.
"""
self.sig_quit_requested.emit()
def restore_stds(self):
"""
Restore stdout and stderr when using open file dialogs.
"""
self.get_widget().restore_stds()
def redirect_stds(self):
"""
Redirect stdout and stderr when using open file dialogs.
"""
self.get_widget().redirect_stds()
def set_exit_function(self, func):
"""
Set the callback function to execute when the `exit_interpreter` is
called.
"""
self.get_widget().set_exit_function(func)
def start_interpreter(self, namespace):
"""
Start the internal console interpreter.
Stdin and stdout are now redirected through the internal console.
"""
widget = self.get_widget()
widget.start_interpreter(namespace)
def set_namespace_item(self, name, value):
"""
Add an object to the namespace dictionary of the internal console.
"""
self.get_widget().set_namespace_item(name, value)
| Console |
python | aio-libs__aiohttp | aiohttp/web_response.py | {
"start": 1251,
"end": 1772
} | class ____(enum.Enum):
# The content codings that we have support for.
#
# Additional registered codings are listed at:
# https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
deflate = "deflate"
gzip = "gzip"
identity = "identity"
CONTENT_CODINGS = {coding.value: coding for coding in ContentCoding}
############################################################
# HTTP Response classes
############################################################
| ContentCoding |
python | matplotlib__matplotlib | lib/matplotlib/backend_bases.py | {
"start": 4244,
"end": 25186
} | class ____:
"""
An abstract base class to handle drawing/rendering operations.
The following methods must be implemented in the backend for full
functionality (though just implementing `draw_path` alone would give a
highly capable backend):
* `draw_path`
* `draw_image`
* `draw_gouraud_triangles`
The following methods *should* be implemented in the backend for
optimization reasons:
* `draw_text`
* `draw_markers`
* `draw_path_collection`
* `draw_quad_mesh`
"""
def __init__(self):
super().__init__()
self._texmanager = None
self._text2path = text.TextToPath()
self._raster_depth = 0
self._rasterizing = False
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s* and *gid* (if set) as id.
Only used by the SVG renderer.
"""
def close_group(self, s):
"""
Close a grouping element with label *s*.
Only used by the SVG renderer.
"""
def draw_path(self, gc, path, transform, rgbFace=None):
"""Draw a `~.path.Path` instance using the given affine transform."""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draw a marker at each of *path*'s vertices (excluding control points).
The base (fallback) implementation makes multiple calls to `draw_path`.
Backends may want to override this method in order to draw the marker
only once and reuse it multiple times.
Parameters
----------
gc : `.GraphicsContextBase`
The graphics context.
marker_path : `~matplotlib.path.Path`
The path for the marker.
marker_trans : `~matplotlib.transforms.Transform`
An affine transform applied to the marker.
path : `~matplotlib.path.Path`
The locations to draw the markers.
trans : `~matplotlib.transforms.Transform`
An affine transform applied to the path.
rgbFace : :mpltype:`color`, optional
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offset_trans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position, *, hatchcolors=None):
"""
Draw a collection of *paths*.
Each path is first transformed by the corresponding entry
in *all_transforms* (a list of (3, 3) matrices) and then by
*master_transform*. They are then translated by the corresponding
entry in *offsets*, which has been first transformed by *offset_trans*.
*facecolors*, *edgecolors*, *linewidths*, *linestyles*, *antialiased*
and *hatchcolors* are lists that set the corresponding properties.
.. versionadded:: 3.11
Allow *hatchcolors* to be specified.
*offset_position* is unused now, but the argument is kept for
backwards compatibility.
The base (fallback) implementation makes multiple calls to `draw_path`.
Backends may want to override this in order to render each set of
path data only once, and then reference that path multiple times with
the different offsets, colors, styles etc. The generator methods
`!_iter_collection_raw_paths` and `!_iter_collection` are provided to
help with (and standardize) the implementation across backends. It
is highly recommended to use those generators, so that changes to the
behavior of `draw_path_collection` can be made globally.
"""
path_ids = self._iter_collection_raw_paths(master_transform,
paths, all_transforms)
if hatchcolors is None:
hatchcolors = []
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, list(path_ids), offsets, offset_trans,
facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position, hatchcolors=hatchcolors):
path, transform = path_id
# Only apply another translation if we have an offset, else we
# reuse the initial transform.
if xo != 0 or yo != 0:
# The transformation can be used by multiple paths. Since
# translate is a inplace operation, we need to copy the
# transformation by .frozen() before applying the translation.
transform = transform.frozen()
transform.translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
Draw a quadmesh.
The base (fallback) implementation converts the quadmesh to paths and
then calls `draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh._convert_mesh_to_paths(coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], float)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draw a series of Gouraud triangles.
Parameters
----------
gc : `.GraphicsContextBase`
The graphics context.
triangles_array : (N, 3, 2) array-like
Array of *N* (x, y) points for the triangles.
colors_array : (N, 3, 4) array-like
Array of *N* RGBA colors for each point of the triangles.
transform : `~matplotlib.transforms.Transform`
An affine transform to apply to the points.
"""
raise NotImplementedError
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
Helper method (along with `_iter_collection`) to implement
`draw_path_collection` in a memory-efficient manner.
This method yields all of the base path/transform combinations, given a
master transform, a list of paths and list of transforms.
The arguments should be exactly what is passed in to
`draw_path_collection`.
The backend should take each yielded path and transform and create an
object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in range(N):
path = paths[i % Npaths]
if Ntransforms:
transform = Affine2D(all_transforms[i % Ntransforms])
yield path, transform + master_transform
def _iter_collection_uses_per_path(self, paths, all_transforms,
offsets, facecolors, edgecolors):
"""
Compute how many times each raw path object returned by
`_iter_collection_raw_paths` would be used when calling
`_iter_collection`. This is intended for the backend to decide
on the tradeoff between using the paths in-line and storing
them once and reusing. Rounds up in case the number of uses
is not the same for every path.
"""
Npaths = len(paths)
if Npaths == 0 or len(facecolors) == len(edgecolors) == 0:
return 0
Npath_ids = max(Npaths, len(all_transforms))
N = max(Npath_ids, len(offsets))
return (N + Npath_ids - 1) // Npath_ids
def _iter_collection(self, gc, path_ids, offsets, offset_trans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position, *, hatchcolors):
"""
Helper method (along with `_iter_collection_raw_paths`) to implement
`draw_path_collection` in a memory-efficient manner.
This method yields all of the path, offset and graphics context
combinations to draw the path collection. The caller should already
have looped over the results of `_iter_collection_raw_paths` to draw
this collection.
The arguments should be the same as that passed into
`draw_path_collection`, with the exception of *path_ids*, which is a
list of arbitrary objects that the backend will use to reference one of
the paths created in the `_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nhatchcolors = len(hatchcolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0 and Nhatchcolors == 0) or Npaths == 0:
return
gc0 = self.new_gc()
gc0.copy_properties(gc)
def cycle_or_default(seq, default=None):
# Cycle over *seq* if it is not empty; else always yield *default*.
return (itertools.cycle(seq) if len(seq)
else itertools.repeat(default))
pathids = cycle_or_default(path_ids)
toffsets = cycle_or_default(offset_trans.transform(offsets), (0, 0))
fcs = cycle_or_default(facecolors)
ecs = cycle_or_default(edgecolors)
hcs = cycle_or_default(hatchcolors)
lws = cycle_or_default(linewidths)
lss = cycle_or_default(linestyles)
aas = cycle_or_default(antialiaseds)
urls = cycle_or_default(urls)
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
for pathid, (xo, yo), fc, ec, hc, lw, ls, aa, url in itertools.islice(
zip(pathids, toffsets, fcs, ecs, hcs, lws, lss, aas, urls), N):
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(lw)
if Nlinestyles:
gc0.set_dashes(*ls)
if len(ec) == 4 and ec[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(ec)
if Nhatchcolors:
gc0.set_hatch_color(hc)
if fc is not None and len(fc) == 4 and fc[3] == 0:
fc = None
gc0.set_antialiased(aa)
if Nurls:
gc0.set_url(url)
yield xo, yo, pathid, gc0, fc
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to `draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im, transform=None):
"""
Draw an RGBA image.
Parameters
----------
gc : `.GraphicsContextBase`
A graphics context with clipping information.
x : float
The distance in physical units (i.e., dots or pixels) from the left
hand side of the canvas.
y : float
The distance in physical units (i.e., dots or pixels) from the
bottom side of the canvas.
im : (N, M, 4) array of `numpy.uint8`
An array of RGBA pixels.
transform : `~matplotlib.transforms.Affine2DBase`
If and only if the concrete backend is written such that
`~.RendererBase.option_scale_image` returns ``True``, an affine
transformation (i.e., an `.Affine2DBase`) *may* be passed to
`~.RendererBase.draw_image`. The translation vector of the
transformation is given in physical units (i.e., dots or pixels).
Note that the transformation does not override *x* and *y*,
and has to be applied *before* translatingthe result by
*x* and *y* (this can be accomplished by adding *x*
and *y* to the translation vector defined by *transform*).
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
Return whether image composition by Matplotlib should be skipped.
Raster backends should usually return False (letting the C-level
rasterizer take care of image composition); vector backends should
usually return ``not rcParams["image.composite_image"]``.
"""
return False
def option_scale_image(self):
"""
Return whether arbitrary affine transformations in
`~.RendererBase.draw_image` are supported (True for most vector backends).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, *, mtext=None):
"""
Draw a TeX instance.
Parameters
----------
gc : `.GraphicsContextBase`
The graphics context.
x : float
The x location of the text in display coords.
y : float
The y location of the text baseline in display coords.
s : str
The TeX text string.
prop : `~matplotlib.font_manager.FontProperties`
The font properties.
angle : float
The rotation angle in degrees anti-clockwise.
mtext : `~matplotlib.text.Text`
The original text object to be rendered.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw a text instance.
Parameters
----------
gc : `.GraphicsContextBase`
The graphics context.
x : float
The x location of the text in display coords.
y : float
The y location of the text baseline in display coords.
s : str
The text string.
prop : `~matplotlib.font_manager.FontProperties`
The font properties.
angle : float
The rotation angle in degrees anti-clockwise.
ismath : bool or "TeX"
If True, use mathtext parser.
mtext : `~matplotlib.text.Text`
The original text object to be rendered.
Notes
-----
**Notes for backend implementers:**
`.RendererBase.draw_text` also supports passing "TeX" to the *ismath*
parameter to use TeX rendering, but this is not required for actual
rendering backends, and indeed many builtin backends do not support
this. Rather, TeX rendering is provided by `~.RendererBase.draw_tex`.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
Draw the text by converting them to paths using `.TextToPath`.
This private helper supports the same parameters as
`~.RendererBase.draw_text`; setting *ismath* to "TeX" triggers TeX
rendering.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
verts, codes = text2path.get_text_path(prop, s, ismath=ismath)
path = Path(verts, codes)
if self.flipy():
width, height = self.get_canvas_width_height()
transform = (Affine2D()
.scale(fontsize / text2path.FONT_SCALE)
.rotate_deg(angle)
.translate(x, height - y))
else:
transform = (Affine2D()
.scale(fontsize / text2path.FONT_SCALE)
.rotate_deg(angle)
.translate(x, y))
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
Get the width, height, and descent (offset from the bottom to the baseline), in
display coords, of the string *s* with `.FontProperties` *prop*.
Whitespace at the start and the end of *s* is included in the reported width.
"""
fontsize = prop.get_size_in_points()
if ismath == 'TeX':
# todo: handle properties
return self.get_texmanager().get_text_width_height_descent(
s, fontsize, renderer=self)
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
font.set_size(fontsize, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return whether y values increase from top to bottom.
Note that this only affects drawing of texts.
"""
return True
def get_canvas_width_height(self):
"""Return the canvas width and height in display coords."""
return 1, 1
def get_texmanager(self):
"""Return the `.TexManager` instance."""
if self._texmanager is None:
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""Return an instance of a `.GraphicsContextBase`."""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units.
You need to override this function (unless your backend
doesn't have a dpi, e.g., postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72 * dpi/72
Parameters
----------
points : float or array-like
Returns
-------
Points converted to pixels
"""
return points
def start_rasterizing(self):
"""
Switch to the raster renderer.
Used by `.MixedModeRenderer`.
"""
def stop_rasterizing(self):
"""
Switch back to the vector renderer and draw the contents of the raster
renderer as an image on the vector renderer.
Used by `.MixedModeRenderer`.
"""
def start_filter(self):
"""
Switch to a temporary renderer for image filtering effects.
Currently only supported by the agg renderer.
"""
def stop_filter(self, filter_func):
"""
Switch back to the original renderer. The contents of the temporary
renderer is processed with the *filter_func* and is drawn on the
original renderer as an image.
Currently only supported by the agg renderer.
"""
def _draw_disabled(self):
"""
Context manager to temporary disable drawing.
This is used for getting the drawn size of Artists. This lets us
run the draw process to update any Python state but does not pay the
cost of the draw_XYZ calls on the canvas.
"""
no_ops = {
meth_name: functools.update_wrapper(lambda *args, **kwargs: None,
getattr(RendererBase, meth_name))
for meth_name in dir(RendererBase)
if (meth_name.startswith("draw_")
or meth_name in ["open_group", "close_group"])
}
return _setattr_cm(self, **no_ops)
| RendererBase |
python | huggingface__transformers | src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py | {
"start": 22903,
"end": 29669
} | class ____(Ernie4_5_MoePreTrainedModel):
def __init__(self, config: Ernie4_5_MoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Ernie4_5_MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Ernie4_5_MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Ernie4_5_MoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def load_balancing_loss_func(
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
num_experts: Optional[int] = None,
top_k=2,
attention_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, int]:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [batch_size X sequence_length, num_experts].
num_experts:
Number of experts
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
if isinstance(gate_logits, tuple):
compute_device = gate_logits[0].device
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
.reshape(-1, top_k, num_experts)
.to(compute_device)
)
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
expert_attention_mask, dim=0
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
.reshape(-1, num_experts)
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
return overall_loss * num_experts
@auto_docstring
| Ernie4_5_MoeModel |
python | ansible__ansible | lib/ansible/cli/inventory.py | {
"start": 993,
"end": 15983
} | class ____(CLI):
""" used to display or dump the configured inventory as Ansible sees it """
name = 'ansible-inventory'
ARGUMENTS = {'group': 'The name of a group in the inventory, relevant when using --graph', }
def __init__(self, args):
super(InventoryCLI, self).__init__(args)
self.vm = None
self.loader = None
self.inventory = None
def init_parser(self):
super(InventoryCLI, self).init_parser(
desc='Show Ansible inventory information, by default it uses the inventory script JSON format')
opt_help.add_inventory_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_basedir_options(self.parser)
opt_help.add_runtask_options(self.parser)
# remove unused default options
self.parser.add_argument('--list-hosts', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument)
self.parser.add_argument('args', metavar='group', nargs='?', help='The name of a group in the inventory, relevant when using --graph')
# Actions
action_group = self.parser.add_argument_group("Actions", "One of following must be used on invocation, ONLY ONE!")
action_group.add_argument("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
action_group.add_argument("--host", action="store", default=None, dest='host',
help='Output specific host info, works as inventory script. It will ignore limit')
action_group.add_argument("--graph", action="store_true", default=False, dest='graph',
help='create inventory graph, if supplying pattern it must be a valid group name. It will ignore limit')
self.parser.add_argument_group(action_group)
# graph
self.parser.add_argument("-y", "--yaml", action="store_true", default=False, dest='yaml',
help='Use YAML format instead of default JSON, ignored for --graph')
self.parser.add_argument('--toml', action='store_true', default=False, dest='toml',
help='Use TOML format instead of default JSON, ignored for --graph')
self.parser.add_argument("--vars", action="store_true", default=False, dest='show_vars',
help='Add vars to graph display, ignored unless used with --graph')
# list
self.parser.add_argument("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export',
help="When doing --list, represent in a way that is optimized for export,"
"not as an accurate representation of how Ansible has processed it")
self.parser.add_argument('--output', default=None, dest='output_file',
help="When doing --list, send the inventory to a file instead of to the screen")
# self.parser.add_argument("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins',
# help="When doing --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
def post_process_args(self, options):
options = super(InventoryCLI, self).post_process_args(options)
display.verbosity = options.verbosity
self.validate_conflicts(options)
# there can be only one! and, at least, one!
used = 0
for opt in (options.list, options.host, options.graph):
if opt:
used += 1
if used == 0:
raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
elif used > 1:
raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
# set host pattern to default if not supplied
if options.args:
options.pattern = options.args
else:
options.pattern = 'all'
return options
def run(self):
super(InventoryCLI, self).run()
# Initialize needed objects
self.loader, self.inventory, self.vm = self._play_prereqs()
results = None
if context.CLIARGS['host']:
hosts = self.inventory.get_hosts(context.CLIARGS['host'])
if len(hosts) != 1:
raise AnsibleOptionsError("You must pass a single valid host to --host parameter")
myvars = self._get_host_variables(host=hosts[0])
# FIXME: should we template first?
results = self.dump(myvars)
else:
if context.CLIARGS['subset']:
# not doing single host, set limit in general if given
self.inventory.subset(context.CLIARGS['subset'])
if context.CLIARGS['graph']:
results = self.inventory_graph()
elif context.CLIARGS['list']:
top = self._get_group('all')
if context.CLIARGS['yaml']:
results = self.yaml_inventory(top)
elif context.CLIARGS['toml']:
results = self.toml_inventory(top)
else:
results = self.json_inventory(top)
results = self.dump(results)
if results:
outfile = context.CLIARGS['output_file']
if outfile is None:
# FIXME: pager?
display.display(results)
else:
try:
with open(to_bytes(outfile), 'wb') as f:
f.write(to_bytes(results))
except OSError as ex:
raise AnsibleError(f'Unable to write to destination file {outfile!r}.') from ex
sys.exit(0)
sys.exit(1)
@staticmethod
def dump(stuff):
if context.CLIARGS['yaml']:
import yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
results = to_text(yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False, allow_unicode=True))
elif context.CLIARGS['toml']:
results = toml_dumps(stuff)
else:
results = json.dumps(stuff, cls=_inventory_legacy.Encoder, sort_keys=True, indent=4)
return results
def _get_group_variables(self, group):
# get info from inventory source
res = group.get_vars()
# Always load vars plugins
res = combine_vars(res, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [group], 'all'))
if context.CLIARGS['basedir']:
res = combine_vars(res, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [group], 'all'))
if group.priority != 1:
res['ansible_group_priority'] = group.priority
return self._remove_internal(res)
def _get_host_variables(self, host):
if context.CLIARGS['export']:
# only get vars defined directly host
hostvars = host.get_vars()
# Always load vars plugins
hostvars = combine_vars(hostvars, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [host], 'all'))
if context.CLIARGS['basedir']:
hostvars = combine_vars(hostvars, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [host], 'all'))
else:
# get all vars flattened by host, but skip magic hostvars
hostvars = self.vm.get_vars(host=host, include_hostvars=False, stage='all')
return self._remove_internal(hostvars)
def _get_group(self, gname):
group = self.inventory.groups.get(gname)
return group
@staticmethod
def _remove_internal(dump):
for internal in C.INTERNAL_STATIC_VARS:
if internal in dump:
del dump[internal]
return dump
@staticmethod
def _remove_empty_keys(dump):
# remove empty keys
for x in ('hosts', 'vars', 'children'):
if x in dump and not dump[x]:
del dump[x]
@staticmethod
def _show_vars(dump, depth):
result = []
for (name, val) in sorted(dump.items()):
result.append(InventoryCLI._graph_name('{%s = %s}' % (name, val), depth))
return result
@staticmethod
def _graph_name(name, depth=0):
if depth:
name = " |" * (depth) + "--%s" % name
return name
def _graph_group(self, group, depth=0):
result = [self._graph_name('@%s:' % group.name, depth)]
depth = depth + 1
for kid in group.child_groups:
result.extend(self._graph_group(kid, depth))
if group.name != 'all':
for host in group.hosts:
result.append(self._graph_name(host.name, depth))
if context.CLIARGS['show_vars']:
result.extend(self._show_vars(self._get_host_variables(host), depth + 1))
if context.CLIARGS['show_vars']:
result.extend(self._show_vars(self._get_group_variables(group), depth))
return result
def inventory_graph(self):
start_at = self._get_group(context.CLIARGS['pattern'])
if start_at:
return '\n'.join(self._graph_group(start_at))
else:
raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
def json_inventory(self, top):
seen_groups = set()
def format_group(group, available_hosts):
results = {}
results[group.name] = {}
if group.name != 'all':
results[group.name]['hosts'] = [h.name for h in group.hosts if h.name in available_hosts]
results[group.name]['children'] = []
for subgroup in group.child_groups:
results[group.name]['children'].append(subgroup.name)
if subgroup.name not in seen_groups:
results.update(format_group(subgroup, available_hosts))
seen_groups.add(subgroup.name)
if context.CLIARGS['export']:
results[group.name]['vars'] = self._get_group_variables(group)
self._remove_empty_keys(results[group.name])
# remove empty groups
if not results[group.name]:
del results[group.name]
return results
hosts = self.inventory.get_hosts(top.name)
results = format_group(top, frozenset(h.name for h in hosts))
# populate meta
results['_meta'] = {
'hostvars': {},
'profile': _inventory_legacy.Encoder.profile_name,
}
for host in hosts:
hvars = self._get_host_variables(host)
if hvars:
results['_meta']['hostvars'][host.name] = hvars
return results
def yaml_inventory(self, top):
seen_hosts = set()
seen_groups = set()
def format_group(group, available_hosts):
results = {}
# initialize group + vars
results[group.name] = {}
# subgroups
results[group.name]['children'] = {}
for subgroup in group.child_groups:
if subgroup.name != 'all':
if subgroup.name in seen_groups:
results[group.name]['children'].update({subgroup.name: {}})
else:
results[group.name]['children'].update(format_group(subgroup, available_hosts))
seen_groups.add(subgroup.name)
# hosts for group
results[group.name]['hosts'] = {}
if group.name != 'all':
for h in group.hosts:
if h.name not in available_hosts:
continue # observe limit
myvars = {}
if h.name not in seen_hosts: # avoid defining host vars more than once
seen_hosts.add(h.name)
myvars = self._get_host_variables(host=h)
results[group.name]['hosts'][h.name] = myvars
if context.CLIARGS['export']:
gvars = self._get_group_variables(group)
if gvars:
results[group.name]['vars'] = gvars
self._remove_empty_keys(results[group.name])
# remove empty groups
if not results[group.name]:
del results[group.name]
return results
available_hosts = frozenset(h.name for h in self.inventory.get_hosts(top.name))
return format_group(top, available_hosts)
def toml_inventory(self, top):
seen_hosts = set()
seen_hosts = set()
has_ungrouped = bool(next(g.hosts for g in top.child_groups if g.name == 'ungrouped'))
def format_group(group, available_hosts):
results = {}
results[group.name] = {}
results[group.name]['children'] = []
for subgroup in group.child_groups:
if subgroup.name == 'ungrouped' and not has_ungrouped:
continue
if group.name != 'all':
results[group.name]['children'].append(subgroup.name)
results.update(format_group(subgroup, available_hosts))
if group.name != 'all':
for host in group.hosts:
if host.name not in available_hosts:
continue
if host.name not in seen_hosts:
seen_hosts.add(host.name)
host_vars = self._get_host_variables(host=host)
else:
host_vars = {}
try:
results[group.name]['hosts'][host.name] = host_vars
except KeyError:
results[group.name]['hosts'] = {host.name: host_vars}
if context.CLIARGS['export']:
results[group.name]['vars'] = self._get_group_variables(group)
self._remove_empty_keys(results[group.name])
# remove empty groups
if not results[group.name]:
del results[group.name]
return results
available_hosts = frozenset(h.name for h in self.inventory.get_hosts(top.name))
results = format_group(top, available_hosts)
return results
def toml_dumps(data: t.Any) -> str:
try:
from tomli_w import dumps as _tomli_w_dumps
except ImportError:
pass
else:
return _tomli_w_dumps(data)
raise AnsibleRuntimeError('The Python library "tomli-w" is required when using the TOML output format.')
def main(args=None):
InventoryCLI.cli_executor(args)
if __name__ == '__main__':
main()
| InventoryCLI |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 24084,
"end": 24557
} | class ____(models.Model):
name = models.CharField(max_length=15, db_index=True)
name_keeps_index = models.CharField(max_length=15, db_index=True)
fk = models.ForeignKey(
"Library", on_delete=models.CASCADE, null=True, related_name="+"
)
fk_keeps_index = models.ForeignKey(
"Library", on_delete=models.CASCADE, null=True, related_name="+"
)
history = HistoricalRecords(no_db_index=["name", "fk", "other"])
| ModelWithMultipleNoDBIndex |
python | getsentry__sentry | src/sentry/incidents/action_handlers.py | {
"start": 11461,
"end": 13255
} | class ____(DefaultActionHandler):
@property
def provider(self) -> str:
return IntegrationProviderSlug.PAGERDUTY.value
def send_alert(
self,
action: AlertRuleTriggerAction,
incident: Incident,
project: Project,
metric_value: int | float | None,
new_status: IncidentStatus,
notification_uuid: str | None = None,
):
from sentry.integrations.pagerduty.utils import send_incident_alert_notification
if metric_value is None:
metric_value = get_metric_count_from_incident(incident)
notification_context = NotificationContext.from_alert_rule_trigger_action(action)
alert_context = AlertContext.from_alert_rule_incident(incident.alert_rule)
metric_issue_context = MetricIssueContext.from_legacy_models(
incident=incident,
new_status=new_status,
metric_value=metric_value,
)
success = send_incident_alert_notification(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
organization=incident.organization,
notification_uuid=notification_uuid,
)
if success:
self.record_alert_sent_analytics(
organization_id=incident.organization.id,
project_id=project.id,
alert_id=incident.alert_rule.id,
external_id=action.target_identifier,
notification_uuid=notification_uuid,
)
@AlertRuleTriggerAction.register_type(
"opsgenie",
AlertRuleTriggerAction.Type.OPSGENIE,
[AlertRuleTriggerAction.TargetType.SPECIFIC],
integration_provider="opsgenie",
)
| PagerDutyActionHandler |
python | walkccc__LeetCode | solutions/653. Two Sum IV - Input is a BST/653.py | {
"start": 511,
"end": 915
} | class ____:
def findTarget(self, root: TreeNode | None, k: int) -> bool:
if not root:
return False
left = BSTIterator(root, True)
right = BSTIterator(root, False)
l = left.next()
r = right.next()
while l < r:
summ = l + r
if summ == k:
return True
if summ < k:
l = left.next()
else:
r = right.next()
return False
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/dagster_run.py | {
"start": 24545,
"end": 25926
} | class ____(
IHaveNew,
LegacyNamedTupleMixin,
LoadableBy[str],
):
"""Internal representation of a run record, as stored in a
:py:class:`~dagster._core.storage.runs.RunStorage`.
Users should not invoke this class directly.
"""
storage_id: int
dagster_run: DagsterRun
create_timestamp: datetime
update_timestamp: datetime
# start_time and end_time fields will be populated once the run has started and ended, respectively, but will be None beforehand.
start_time: Optional[float] = None
end_time: Optional[float] = None
@classmethod
def _blocking_batch_load(
cls, keys: Iterable[str], context: LoadingContext
) -> Iterable[Optional["RunRecord"]]:
result_map: dict[str, Optional[RunRecord]] = {run_id: None for run_id in keys}
run_ids = list(result_map.keys())
records = []
batch_size = int(os.getenv("DAGSTER_RUN_RECORD_LOADER_BATCH_SIZE", "100"))
for i in range(0, len(run_ids), batch_size):
chunk = run_ids[i : i + batch_size]
chunk_records = context.instance.get_run_records(RunsFilter(run_ids=chunk))
records.extend([record for record in chunk_records if record])
for r in records:
result_map[r.dagster_run.run_id] = r
return [result_map[k] for k in keys]
@whitelist_for_serdes
@record
| RunRecord |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/supervisor.py | {
"start": 60450,
"end": 60661
} | class ____:
"""Result of running a task via ``InProcessTestSupervisor``."""
ti: RuntimeTI
state: str
msg: BaseModel | None
error: BaseException | None
@attrs.define(kw_only=True)
| TaskRunResult |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol12.py | {
"start": 185,
"end": 237
} | class ____(BaseClass, Protocol):
x: int
| DerivedClass |
python | ray-project__ray | rllib/examples/envs/classes/multi_agent/two_step_game.py | {
"start": 3726,
"end": 4558
} | class ____(MultiAgentEnv):
def __init__(self, env_config):
self._agent_ids = {"agents"}
super().__init__()
env = TwoStepGame(env_config)
tuple_obs_space = Tuple([env.observation_space, env.observation_space])
tuple_act_space = Tuple([env.action_space, env.action_space])
self.env = env.with_agent_groups(
groups={"agents": [0, 1]},
obs_space=tuple_obs_space,
act_space=tuple_act_space,
)
self.observation_space = Dict({"agents": self.env.observation_space})
self.action_space = Dict({"agents": self.env.action_space})
def reset(self, *, seed=None, options=None):
return self.env.reset(seed=seed, options=options)
def step(self, actions):
return self.env.step(actions)
| TwoStepGameWithGroupedAgents |
python | pytorch__pytorch | benchmarks/transformer/sdp.py | {
"start": 2281,
"end": 2467
} | class ____:
config: ExperimentConfig
results: ExperimentResults
def get_entries(self) -> list:
return self.config.get_entries() + self.results.get_entries()
| Experiment |
python | python-markdown__markdown | markdown/extensions/attr_list.py | {
"start": 7508,
"end": 7838
} | class ____(Extension):
""" Attribute List extension for Python-Markdown """
def extendMarkdown(self, md):
md.treeprocessors.register(AttrListTreeprocessor(md), 'attr_list', 8)
md.registerExtension(self)
def makeExtension(**kwargs): # pragma: no cover
return AttrListExtension(**kwargs)
| AttrListExtension |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_compat/common/_typing.py | {
"start": 3309,
"end": 3439
} | class ____(TypedDict):
float32: DType
float64: DType
# `__array_namespace_info__.dtypes(kind="complex floating")`
| DTypesReal |
python | astropy__astropy | astropy/coordinates/builtin_frames/lsr.py | {
"start": 7506,
"end": 9360
} | class ____(BaseRADecFrame):
"""A frame in the Kinematic Local Standard of Rest (LSR).
Conceptually the kinematic LSR is a frame where the average motion
of the stars in the solar neighborhood is zero. In practice, the
observed average motion is different for different spectral types,
which has historically justified using convenient rounded values for
the solar motion relative to the LSR. This LSRK frame uses the
definition from
Gordon 1975, Methods of Experimental Physics: Volume 12:
Astrophysics, Part C: Radio Observations - Section 6.1.5.
meaning the solar motion is 20 km/s towards RA=270 Dec=30 (B1900).
The frame is axis-aligned and co-spatial with `~astropy.coordinates.ICRS`.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# GORDON1975_V_BARY = 20*u.km/u.s
# GORDON1975_DIRECTION = FK4(ra=270*u.deg, dec=30*u.deg, equinox='B1900')
# V_OFFSET_LSRK = ((GORDON1975_V_BARY * GORDON1975_DIRECTION.transform_to(ICRS()).data)
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRK = r.CartesianDifferential(
[0.28999706839034606, -17.317264789717928, 10.00141199546947] * u.km / u.s
)
ICRS_LSRK_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=V_OFFSET_LSRK
)
LSRK_ICRS_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=-V_OFFSET_LSRK
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRK)
def icrs_to_lsrk(icrs_coord, lsr_frame):
return None, ICRS_LSRK_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRK, ICRS)
def lsrk_to_icrs(lsr_coord, icrs_frame):
return None, LSRK_ICRS_OFFSET
# ------------------------------------------------------------------------------
| LSRK |
python | pypa__pipenv | pipenv/patched/pip/_internal/cli/req_command.py | {
"start": 3031,
"end": 13264
} | class ____(IndexGroupCommand):
def __init__(self, *args: Any, **kw: Any) -> None:
super().__init__(*args, **kw)
self.cmd_opts.add_option(cmdoptions.dependency_groups())
self.cmd_opts.add_option(cmdoptions.no_clean())
@staticmethod
def determine_resolver_variant(options: Values) -> str:
"""Determines which resolver should be used, based on the given options."""
if "legacy-resolver" in options.deprecated_features_enabled:
return "legacy"
return "resolvelib"
@classmethod
def make_requirement_preparer(
cls,
temp_build_dir: TempDirectory,
options: Values,
build_tracker: BuildTracker,
session: PipSession,
finder: PackageFinder,
use_user_site: bool,
download_dir: Optional[str] = None,
verbosity: int = 0,
) -> RequirementPreparer:
"""
Create a RequirementPreparer instance for the given parameters.
"""
temp_build_dir_path = temp_build_dir.path
assert temp_build_dir_path is not None
legacy_resolver = False
resolver_variant = cls.determine_resolver_variant(options)
if resolver_variant == "resolvelib":
lazy_wheel = "fast-deps" in options.features_enabled
if lazy_wheel:
logger.warning(
"pip is using lazily downloaded wheels using HTTP "
"range requests to obtain dependency information. "
"This experimental feature is enabled through "
"--use-feature=fast-deps and it is not ready for "
"production."
)
else:
legacy_resolver = True
lazy_wheel = False
if "fast-deps" in options.features_enabled:
logger.warning(
"fast-deps has no effect when used with the legacy resolver."
)
return RequirementPreparer(
build_dir=temp_build_dir_path,
src_dir=options.src_dir,
download_dir=download_dir,
build_isolation=options.build_isolation,
check_build_deps=options.check_build_deps,
build_tracker=build_tracker,
session=session,
progress_bar=options.progress_bar,
finder=finder,
require_hashes=options.require_hashes,
use_user_site=use_user_site,
lazy_wheel=lazy_wheel,
verbosity=verbosity,
legacy_resolver=legacy_resolver,
resume_retries=options.resume_retries,
)
@classmethod
def make_resolver(
cls,
preparer: RequirementPreparer,
finder: PackageFinder,
options: Values,
wheel_cache: Optional[WheelCache] = None,
use_user_site: bool = False,
ignore_installed: bool = True,
ignore_requires_python: bool = False,
force_reinstall: bool = False,
upgrade_strategy: str = "to-satisfy-only",
use_pep517: Optional[bool] = None,
py_version_info: Optional[Tuple[int, ...]] = None,
) -> BaseResolver:
"""
Create a Resolver instance for the given parameters.
"""
make_install_req = partial(
install_req_from_req_string,
isolated=options.isolated_mode,
use_pep517=use_pep517,
)
resolver_variant = cls.determine_resolver_variant(options)
# The long import name and duplicated invocation is needed to convince
# Mypy into correctly typechecking. Otherwise it would complain the
# "Resolver" class being redefined.
if resolver_variant == "resolvelib":
import pipenv.patched.pip._internal.resolution.resolvelib.resolver
return pipenv.patched.pip._internal.resolution.resolvelib.resolver.Resolver(
preparer=preparer,
finder=finder,
wheel_cache=wheel_cache,
make_install_req=make_install_req,
use_user_site=use_user_site,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
force_reinstall=force_reinstall,
upgrade_strategy=upgrade_strategy,
py_version_info=py_version_info,
)
import pipenv.patched.pip._internal.resolution.legacy.resolver
return pipenv.patched.pip._internal.resolution.legacy.resolver.Resolver(
preparer=preparer,
finder=finder,
wheel_cache=wheel_cache,
make_install_req=make_install_req,
use_user_site=use_user_site,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
force_reinstall=force_reinstall,
upgrade_strategy=upgrade_strategy,
py_version_info=py_version_info,
)
def get_requirements(
self,
args: List[str],
options: Values,
finder: PackageFinder,
session: PipSession,
) -> List[InstallRequirement]:
"""
Parse command-line arguments into the corresponding requirements.
"""
requirements: List[InstallRequirement] = []
for filename in options.constraints:
for parsed_req in parse_requirements(
filename,
constraint=True,
finder=finder,
options=options,
session=session,
):
req_to_add = install_req_from_parsed_requirement(
parsed_req,
isolated=options.isolated_mode,
user_supplied=False,
)
requirements.append(req_to_add)
for req in args:
req_to_add = install_req_from_line(
req,
comes_from=None,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
user_supplied=True,
config_settings=getattr(options, "config_settings", None),
)
requirements.append(req_to_add)
if options.dependency_groups:
for req in parse_dependency_groups(options.dependency_groups):
req_to_add = install_req_from_req_string(
req,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
user_supplied=True,
)
requirements.append(req_to_add)
for req in options.editables:
req_to_add = install_req_from_editable(
req,
user_supplied=True,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
config_settings=getattr(options, "config_settings", None),
)
requirements.append(req_to_add)
# NOTE: options.require_hashes may be set if --require-hashes is True
for filename in options.requirements:
for parsed_req in parse_requirements(
filename, finder=finder, options=options, session=session
):
req_to_add = install_req_from_parsed_requirement(
parsed_req,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
user_supplied=True,
config_settings=(
parsed_req.options.get("config_settings")
if parsed_req.options
else None
),
)
requirements.append(req_to_add)
# If any requirement has hash options, enable hash checking.
if any(req.has_hash_options for req in requirements):
options.require_hashes = True
if not (
args
or options.editables
or options.requirements
or options.dependency_groups
):
opts = {"name": self.name}
if options.find_links:
raise CommandError(
"You must give at least one requirement to {name} "
'(maybe you meant "pip {name} {links}"?)'.format(
**dict(opts, links=" ".join(options.find_links))
)
)
else:
raise CommandError(
"You must give at least one requirement to {name} "
'(see "pip help {name}")'.format(**opts)
)
return requirements
@staticmethod
def trace_basic_info(finder: PackageFinder) -> None:
"""
Trace basic information about the provided objects.
"""
# Display where finder is looking for packages
search_scope = finder.search_scope
locations = search_scope.get_formatted_locations()
if locations:
logger.info(locations)
def _build_package_finder(
self,
options: Values,
session: PipSession,
target_python: Optional[TargetPython] = None,
ignore_requires_python: Optional[bool] = None,
) -> PackageFinder:
"""
Create a package finder appropriate to this requirement command.
:param ignore_requires_python: Whether to ignore incompatible
"Requires-Python" values in links. Defaults to False.
"""
link_collector = LinkCollector.create(session, options=options)
selection_prefs = SelectionPreferences(
allow_yanked=True,
format_control=options.format_control,
allow_all_prereleases=options.pre,
prefer_binary=options.prefer_binary,
ignore_requires_python=ignore_requires_python,
)
return PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
target_python=target_python,
)
| RequirementCommand |
python | python-poetry__poetry | tests/conftest.py | {
"start": 4537,
"end": 5837
} | class ____(KeyringBackend):
def __init__(self) -> None:
self._passwords: dict[str, dict[str, str]] = {}
self._service_defaults: dict[str, Credential] = {}
@properties.classproperty
def priority(self) -> float:
return 42
def set_password(self, service: str, username: str, password: str) -> None:
self._passwords[service] = {username: password}
def get_password(self, service: str, username: str) -> str | None:
return self._passwords.get(service, {}).get(username)
def get_credential(
self,
service: str,
username: str | None,
) -> Credential | None:
if username is None:
credential = self._service_defaults.get(service)
return credential
password = self.get_password(service, username)
if password is None:
return None
return SimpleCredential(username, password)
def delete_password(self, service: str, username: str) -> None:
if service in self._passwords and username in self._passwords[service]:
del self._passwords[service][username]
def set_default_service_credential(
self, service: str, credential: Credential
) -> None:
self._service_defaults[service] = credential
| DummyBackend |
python | coleifer__peewee | peewee.py | {
"start": 56129,
"end": 56540
} | class ____(ColumnBase):
def __init__(self, **query):
super(DQ, self).__init__()
self.query = query
self._negated = False
@Node.copy
def __invert__(self):
self._negated = not self._negated
def clone(self):
node = DQ(**self.query)
node._negated = self._negated
return node
#: Represent a row tuple.
Tuple = lambda *a: EnclosedNodeList(a)
| DQ |
python | gevent__gevent | src/greentest/3.14/test_smtplib.py | {
"start": 4803,
"end": 4889
} | class ____(GeneralTests, unittest.TestCase):
client = smtplib.SMTP
| SMTPGeneralTests |
python | ray-project__ray | python/ray/tune/tests/test_tune_restore_warm_start.py | {
"start": 10920,
"end": 11593
} | class ____(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = {"width": tune.uniform(0, 20), "height": tune.uniform(-100, 100)}
def cost(space):
for i in range(10):
tune.report(
dict(loss=(space["height"] - 14) ** 2 - abs(space["width"] - 3 - i))
)
search_alg = TuneBOHB(space=space, metric="loss", mode="min", seed=1)
return search_alg, cost
def get_scheduler(self):
return HyperBandForBOHB(max_t=100, metric="loss", mode="min")
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
| BOHBWarmStartTest |
python | urllib3__urllib3 | src/urllib3/_base_connection.py | {
"start": 273,
"end": 478
} | class ____(typing.NamedTuple):
ssl_context: ssl.SSLContext | None
use_forwarding_for_https: bool
assert_hostname: None | str | typing.Literal[False]
assert_fingerprint: str | None
| ProxyConfig |
python | allegroai__clearml | clearml/utilities/gpu/pynvml.py | {
"start": 31793,
"end": 36353
} | class ____(Exception):
_valClassMapping = dict()
# List of currently known error codes
_errcode_to_string = {
NVML_ERROR_UNINITIALIZED: "Uninitialized",
NVML_ERROR_INVALID_ARGUMENT: "Invalid Argument",
NVML_ERROR_NOT_SUPPORTED: "Not Supported",
NVML_ERROR_NO_PERMISSION: "Insufficient Permissions",
NVML_ERROR_ALREADY_INITIALIZED: "Already Initialized",
NVML_ERROR_NOT_FOUND: "Not Found",
NVML_ERROR_INSUFFICIENT_SIZE: "Insufficient Size",
NVML_ERROR_INSUFFICIENT_POWER: "Insufficient External Power",
NVML_ERROR_DRIVER_NOT_LOADED: "Driver Not Loaded",
NVML_ERROR_TIMEOUT: "Timeout",
NVML_ERROR_IRQ_ISSUE: "Interrupt Request Issue",
NVML_ERROR_LIBRARY_NOT_FOUND: "NVML Shared Library Not Found",
NVML_ERROR_FUNCTION_NOT_FOUND: "Function Not Found",
NVML_ERROR_CORRUPTED_INFOROM: "Corrupted infoROM",
NVML_ERROR_GPU_IS_LOST: "GPU is lost",
NVML_ERROR_RESET_REQUIRED: "GPU requires restart",
NVML_ERROR_OPERATING_SYSTEM: "The operating system has blocked the request.",
NVML_ERROR_LIB_RM_VERSION_MISMATCH: "RM has detected an NVML/RM version mismatch.",
NVML_ERROR_MEMORY: "Insufficient Memory",
NVML_ERROR_UNKNOWN: "Unknown Error",
}
def __new__(typ, value):
'''
Maps value to a proper subclass of NVMLError.
See _extractNVMLErrorsAsClasses function for more details
'''
if typ == NVMLError:
typ = NVMLError._valClassMapping.get(value, typ)
obj = Exception.__new__(typ)
obj.value = value
return obj
def __str__(self):
try:
if self.value not in NVMLError._errcode_to_string:
NVMLError._errcode_to_string[self.value] = str(nvmlErrorString(self.value))
return NVMLError._errcode_to_string[self.value]
except NVMLError:
return "NVML Error with code %d" % self.value
def __eq__(self, other):
return self.value == other.value
def nvmlExceptionClass(nvmlErrorCode):
if nvmlErrorCode not in NVMLError._valClassMapping:
raise ValueError('nvmlErrorCode %s is not valid' % nvmlErrorCode)
return NVMLError._valClassMapping[nvmlErrorCode]
def _extractNVMLErrorsAsClasses():
'''
Generates a hierarchy of classes on top of NVMLError class.
Each NVML Error gets a new NVMLError subclass. This way try,except blocks can filter appropriate
exceptions more easily.
NVMLError is a parent class. Each NVML_ERROR_* gets it's own subclass.
e.g. NVML_ERROR_ALREADY_INITIALIZED will be turned into NVMLError_AlreadyInitialized
'''
this_module = sys.modules[__name__]
nvmlErrorsNames = [x for x in dir(this_module) if x.startswith("NVML_ERROR_")]
for err_name in nvmlErrorsNames:
# e.g. Turn NVML_ERROR_ALREADY_INITIALIZED into NVMLError_AlreadyInitialized
class_name = "NVMLError_" + string.capwords(err_name.replace("NVML_ERROR_", ""), "_").replace("_", "")
err_val = getattr(this_module, err_name)
def gen_new(val):
def new(typ):
obj = NVMLError.__new__(typ, val)
return obj
return new
new_error_class = type(class_name, (NVMLError,), {'__new__': gen_new(err_val)})
new_error_class.__module__ = __name__
setattr(this_module, class_name, new_error_class)
NVMLError._valClassMapping[err_val] = new_error_class
_extractNVMLErrorsAsClasses()
def _nvmlCheckReturn(ret):
if (ret != NVML_SUCCESS):
raise NVMLError(ret)
return ret
## Function access ##
_nvmlGetFunctionPointer_cache = dict() # function pointers are cached to prevent unnecessary libLoadLock locking
def _nvmlGetFunctionPointer(name):
global nvmlLib
if name in _nvmlGetFunctionPointer_cache:
return _nvmlGetFunctionPointer_cache[name]
libLoadLock.acquire()
try:
# ensure library was loaded
if nvmlLib is None:
raise NVMLError(NVML_ERROR_UNINITIALIZED)
try:
_nvmlGetFunctionPointer_cache[name] = getattr(nvmlLib, name)
return _nvmlGetFunctionPointer_cache[name]
except AttributeError:
raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release()
## Alternative object
# Allows the object to be printed
# Allows mismatched types to be assigned
# - like None when the Structure variant requires c_uint
| NVMLError |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/manager.py | {
"start": 37774,
"end": 48424
} | class ____(BaseCallbackManager):
"""Callback manager for LangChain."""
def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
run_id: UUID | None = None,
**kwargs: Any,
) -> list[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized: The serialized LLM.
prompts: The list of prompts.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
A callback manager for each prompt as an LLM run.
"""
managers = []
for i, prompt in enumerate(prompts):
# Can't have duplicate runs with the same run ID (if provided)
run_id_ = run_id if i == 0 and run_id is not None else uuid.uuid4()
handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
run_id: UUID | None = None,
**kwargs: Any,
) -> list[CallbackManagerForLLMRun]:
"""Run when chat model starts running.
Args:
serialized: The serialized LLM.
messages: The list of messages.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
A callback manager for each list of messages as an LLM run.
"""
managers = []
for message_list in messages:
if run_id is not None:
run_id_ = run_id
run_id = None
else:
run_id_ = uuid.uuid4()
handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chain_start(
self,
serialized: dict[str, Any] | None,
inputs: dict[str, Any] | Any,
run_id: UUID | None = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized: The serialized chain.
inputs: The inputs to the chain.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
The callback manager for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@override
def on_tool_start(
self,
serialized: dict[str, Any] | None,
input_str: str,
run_id: UUID | None = None,
parent_run_id: UUID | None = None,
inputs: dict[str, Any] | None = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized: Serialized representation of the tool.
input_str: The input to the tool as a string.
Non-string inputs are cast to strings.
run_id: ID for the run.
parent_run_id: The ID of the parent run.
inputs: The original input to the tool if provided.
Recommended for usage instead of input_str when the original
input is needed.
If provided, the inputs are expected to be formatted as a dict.
The keys will correspond to the named-arguments in the tool.
**kwargs: The keyword arguments to pass to the event handler
Returns:
The callback manager for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
inputs=inputs,
**kwargs,
)
return CallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@override
def on_retriever_start(
self,
serialized: dict[str, Any] | None,
query: str,
run_id: UUID | None = None,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> CallbackManagerForRetrieverRun:
"""Run when the retriever starts running.
Args:
serialized: The serialized retriever.
query: The query.
run_id: The ID of the run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
Returns:
The callback manager for the retriever run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_custom_event(
self,
name: str,
data: Any,
run_id: UUID | None = None,
**kwargs: Any,
) -> None:
"""Dispatch an adhoc event to the handlers (async version).
This event should NOT be used in any internal LangChain code. The event
is meant specifically for users of the library to dispatch custom
events that are tailored to their application.
Args:
name: The name of the adhoc event.
data: The data for the adhoc event.
run_id: The ID of the run.
Raises:
ValueError: If additional keyword arguments are passed.
"""
if not self.handlers:
return
if kwargs:
msg = (
"The dispatcher API does not accept additional keyword arguments."
"Please do not pass any additional keyword arguments, instead "
"include them in the data field."
)
raise ValueError(msg)
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_custom_event",
"ignore_custom_event",
name,
data,
run_id=run_id,
tags=self.tags,
metadata=self.metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False, # noqa: FBT001,FBT002
inheritable_tags: list[str] | None = None,
local_tags: list[str] | None = None,
inheritable_metadata: dict[str, Any] | None = None,
local_metadata: dict[str, Any] | None = None,
) -> CallbackManager:
"""Configure the callback manager.
Args:
inheritable_callbacks: The inheritable callbacks.
local_callbacks: The local callbacks.
verbose: Whether to enable verbose mode.
inheritable_tags: The inheritable tags.
local_tags: The local tags.
inheritable_metadata: The inheritable metadata.
local_metadata: The local metadata.
Returns:
The configured callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
verbose=verbose,
)
| CallbackManager |
python | apache__airflow | airflow-core/tests/unit/assets/test_evaluation.py | {
"start": 6161,
"end": 7537
} | class ____:
@pytest.fixture
def asset(self):
"""Example asset links to asset alias resolved_asset_alias_2."""
return Asset(uri="test://asset1/", name="test_name", group="asset")
@pytest.fixture
def asset_alias_1(self):
"""Example asset alias links to no assets."""
return AssetAlias(name="test_name", group="test")
@pytest.fixture
def resolved_asset_alias_2(self):
"""Example asset alias links to asset."""
return AssetAlias(name="test_name_2")
@pytest.fixture
def evaluator(self, session, asset_alias_1, resolved_asset_alias_2, asset):
class _AssetEvaluator(AssetEvaluator): # Can't use mock because AssetEvaluator sets __slots__.
def _resolve_asset_alias(self, o):
if o is asset_alias_1:
return []
if o is resolved_asset_alias_2:
return [asset]
return super()._resolve_asset_alias(o)
return _AssetEvaluator(session)
def test_evaluate_empty(self, evaluator, asset_alias_1, asset):
assert evaluator.run(asset_alias_1, {AssetUniqueKey.from_asset(asset): True}) is False
def test_evalute_resolved(self, evaluator, resolved_asset_alias_2, asset):
assert evaluator.run(resolved_asset_alias_2, {AssetUniqueKey.from_asset(asset): True}) is True
| TestAssetAlias |
python | plotly__plotly.py | plotly/graph_objs/layout/scene/_domain.py | {
"start": 235,
"end": 5045
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene"
_path_str = "layout.scene.domain"
_valid_props = {"column", "row", "x", "y"}
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this scene subplot .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this scene subplot .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
@property
def x(self):
"""
Sets the horizontal domain of this scene subplot (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Sets the vertical domain of this scene subplot (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this scene subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this scene subplot .
x
Sets the horizontal domain of this scene subplot (in
plot fraction).
y
Sets the vertical domain of this scene subplot (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this scene subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this scene subplot .
x
Sets the horizontal domain of this scene subplot (in
plot fraction).
y
Sets the vertical domain of this scene subplot (in plot
fraction).
Returns
-------
Domain
"""
super().__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.Domain`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("column", arg, column)
self._set_property("row", arg, row)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Domain |
python | mkdocs__mkdocs | mkdocs/plugins.py | {
"start": 23912,
"end": 25265
} | class ____(logging.LoggerAdapter):
"""A logger adapter to prefix log messages."""
def __init__(self, prefix: str, logger: logging.Logger) -> None:
"""
Initialize the logger adapter.
Arguments:
prefix: The string to insert in front of every message.
logger: The logger instance.
"""
super().__init__(logger, {})
self.prefix = prefix
def process(self, msg: str, kwargs: MutableMapping[str, Any]) -> tuple[str, Any]:
"""
Process the message.
Arguments:
msg: The message:
kwargs: Remaining arguments.
Returns:
The processed message.
"""
return f"{self.prefix}: {msg}", kwargs
def get_plugin_logger(name: str) -> PrefixedLogger:
"""
Return a logger for plugins.
Arguments:
name: The name to use with `logging.getLogger`.
Returns:
A logger configured to work well in MkDocs,
prefixing each message with the plugin package name.
Example:
```python
from mkdocs.plugins import get_plugin_logger
log = get_plugin_logger(__name__)
log.info("My plugin message")
```
"""
logger = logging.getLogger(f"mkdocs.plugins.{name}")
return PrefixedLogger(name.split(".", 1)[0], logger)
| PrefixedLogger |
python | tiangolo__fastapi | docs_src/schema_extra_example/tutorial003.py | {
"start": 110,
"end": 612
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
@app.put("/items/{item_id}")
async def update_item(
item_id: int,
item: Item = Body(
examples=[
{
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
}
],
),
):
results = {"item_id": item_id, "item": item}
return results
| Item |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/bedrock.py | {
"start": 1800,
"end": 3207
} | class ____(AwsBaseSensor[_GenericBedrockHook]):
"""
General sensor behavior for Amazon Bedrock.
Subclasses must implement following methods:
- ``get_state()``
Subclasses must set the following fields:
- ``INTERMEDIATE_STATES``
- ``FAILURE_STATES``
- ``SUCCESS_STATES``
- ``FAILURE_MESSAGE``
:param deferrable: If True, the sensor will operate in deferrable mode. This mode requires aiobotocore
module to be installed.
(default: False, but can be overridden in config file by setting default_deferrable to True)
"""
INTERMEDIATE_STATES: tuple[str, ...] = ()
FAILURE_STATES: tuple[str, ...] = ()
SUCCESS_STATES: tuple[str, ...] = ()
FAILURE_MESSAGE = ""
aws_hook_class: type[_GenericBedrockHook]
ui_color = "#66c3ff"
def __init__(
self,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs: Any,
):
super().__init__(**kwargs)
self.deferrable = deferrable
def poke(self, context: Context, **kwargs) -> bool:
state = self.get_state()
if state in self.FAILURE_STATES:
raise AirflowException(self.FAILURE_MESSAGE)
return state not in self.INTERMEDIATE_STATES
@abc.abstractmethod
def get_state(self) -> str:
"""Implement in subclasses."""
| BedrockBaseSensor |
python | astropy__astropy | astropy/table/tests/test_masked.py | {
"start": 3614,
"end": 5867
} | class ____(SetupData):
"""Test setting and getting fill value in MaskedColumn and Table"""
def test_init_set_fill_value(self):
"""Check that setting fill_value in the MaskedColumn init works"""
assert self.a.fill_value == 1
c = MaskedColumn(name="c", data=["xxxx", "yyyy"], fill_value="none")
assert c.fill_value == "none"
def test_set_get_fill_value_for_bare_column(self):
"""Check set and get of fill value works for bare Column"""
self.d.fill_value = -999
assert self.d.fill_value == -999
assert np.all(self.d.filled() == [7, -999, 7])
def test_set_get_fill_value_for_str_column(self):
c = MaskedColumn(name="c", data=["xxxx", "yyyy"], mask=[True, False])
# assert np.all(c.filled() == ['N/A', 'yyyy'])
c.fill_value = "ABCDEF"
assert c.fill_value == "ABCD" # string truncated to dtype length
assert np.all(c.filled() == ["ABCD", "yyyy"])
assert np.all(c.filled("XY") == ["XY", "yyyy"])
def test_set_get_fill_value_for_structured_column(self):
assert self.sc.fill_value == np.array((0, -1.0), self.sc.dtype)
sc = self.sc.copy()
assert sc.fill_value.item() == (0, -1.0)
sc.fill_value = (-1, np.inf)
assert sc.fill_value == np.array((-1, np.inf), self.sc.dtype)
sc2 = MaskedColumn(sc, fill_value=(-2, -np.inf))
assert sc2.fill_value == np.array((-2, -np.inf), sc2.dtype)
def test_table_column_mask_not_ref(self):
"""Table column mask is not ref of original column mask"""
self.b.fill_value = -999
assert self.t["b"].fill_value != -999
def test_set_get_fill_value_for_table_column(self):
"""Check set and get of fill value works for Column in a Table"""
self.t["b"].fill_value = 1
assert self.t["b"].fill_value == 1
assert np.all(self.t["b"].filled() == [1, 1, 1])
def test_data_attribute_fill_and_mask(self):
"""Check that .data attribute preserves fill_value and mask"""
self.t["b"].fill_value = 1
self.t["b"].mask = [True, False, True]
assert self.t["b"].data.fill_value == 1
assert np.all(self.t["b"].data.mask == [True, False, True])
| TestFillValue |
python | tornadoweb__tornado | tornado/test/auth_test.py | {
"start": 1625,
"end": 2873
} | class ____(RequestHandler, OAuthMixin):
def initialize(self, test, version):
self._OAUTH_VERSION = version
self._OAUTH_REQUEST_TOKEN_URL = test.get_url("/oauth1/server/request_token")
self._OAUTH_AUTHORIZE_URL = test.get_url("/oauth1/server/authorize")
self._OAUTH_ACCESS_TOKEN_URL = test.get_url("/oauth1/server/access_token")
def _oauth_consumer_token(self):
return dict(key="asdf", secret="qwer")
@gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user(
http_client=self.settings["http_client"]
)
if user is None:
raise Exception("user is None")
self.finish(user)
return
yield self.authorize_redirect(http_client=self.settings["http_client"])
@gen.coroutine
def _oauth_get_user_future(self, access_token):
if self.get_argument("fail_in_get_user", None):
raise Exception("failing in get_user")
if access_token != dict(key="uiop", secret="5678"):
raise Exception("incorrect access token %r" % access_token)
return dict(email="foo@example.com")
| OAuth1ClientLoginHandler |
python | pytorch__pytorch | torch/_inductor/runtime/autotune_cache.py | {
"start": 11174,
"end": 14808
} | class ____:
"""
Caches a set of LocalAutotuneCacheBackend entries together in a single
cache.
"""
_key: str
_cache: RemoteCache[JsonDataTy]
# All known entries from LocalAutotuneCache.put()
_entries: dict[str, JsonDataTy]
def end_compile(self) -> None:
# TODO: Do we need to compute time_taken_ms and encode that somehow?
if self._entries:
self._cache.put(self._key, self._entries)
def put(self, basename: str, data: JsonDataTy) -> None:
# Do we need to worry about duplicates? We only have a single local fs
# entry - so probably not.
self._entries[basename] = data
def __init__(self, key: str, cache: RemoteCache[JsonDataTy]) -> None:
self._key = key
self._cache = cache
self._entries = {}
def sync(self) -> None:
# We don't currently use this - but we could async load starting at
# `begin_compile` and wait for the load to be finished here.
pass
@classmethod
def _should_use_bundled_autotune_remote_cache(
cls, inductor_meta: _InductorMetaTy
) -> bool:
# The bundled autotune cache is only available if you've also got local
# caching enabled (because we feed the bundled data to the local cache).
if not inductor_meta.get("autotune_local_cache", True):
return False
# Check if the we're enabled via config
if (
bundled_autotune_remote_cache := inductor_meta.get(
"bundled_autotune_remote_cache"
)
) is not None:
return bool(bundled_autotune_remote_cache)
if not cls._get_is_fbcode(inductor_meta):
return False
if torch._utils_internal.is_fb_unit_test():
return False
if inductor_meta.get("is_hip"):
return False
try:
from torch._inductor.fb.remote_cache import REMOTE_CACHE_VERSION
except ModuleNotFoundError:
return False
jk = torch._utils_internal.justknobs_getval_int(
"pytorch/remote_cache:bundled_autotune_remote_cache_version"
)
return REMOTE_CACHE_VERSION >= jk
def _load_cache(self) -> bool:
from torch._inductor import codecache
# The single key is defined on construction of the cache.
entries = self._cache.get(self._key)
if entries is None or not isinstance(entries, dict):
# We couldn't load the cache - so mark _entries as non-None so we
# store local cache values.
return False
# Go through the entries we got from the cache and save them locally.
time_saved_ns = 0
for basename, data in entries.items():
# Reconstruct the final filename (see put())
root, ext = _splitext_nodot(basename)
_, _, filename = codecache.get_path(root, ext)
if isinstance(data, dict) and (tsns := data.get("time_saved_ns")):
time_saved_ns += int(tsns) # type: ignore[arg-type]
local_cache = LocalAutotuneCache()
local_cache.put(filename, data)
codecache.add_ephemeral_timeout_increase_for_distributed(time_saved_ns)
return True
@staticmethod
def _get_is_fbcode(inductor_meta: _InductorMetaTy) -> bool:
return bool(inductor_meta.get("is_fbcode", False))
@staticmethod
def _get_backend_hash(inductor_meta: _InductorMetaTy) -> str:
backend_hash = inductor_meta["backend_hash"]
assert isinstance(backend_hash, str)
return backend_hash
| _AutotuneCacheBundlerImpl |
python | PrefectHQ__prefect | src/prefect/utilities/importtools.py | {
"start": 4981,
"end": 7598
} | class ____(ModuleType):
"""
A fake module returned by `lazy_import` when the module cannot be found. When any
of the module's attributes are accessed, we will throw a `ModuleNotFoundError`.
Adapted from [lazy_loader][1]
[1]: https://github.com/scientific-python/lazy_loader
"""
def __init__(self, error_message: str, help_message: Optional[str] = None) -> None:
self.__error_message = error_message
if not help_message:
help_message = "Import errors for this module are only reported when used."
super().__init__("DelayedImportErrorModule", help_message)
def __getattr__(self, attr: str) -> Any:
if attr == "__file__": # not set but should result in an attribute error?
return super().__getattr__(attr)
raise ModuleNotFoundError(self.__error_message)
def lazy_import(
name: str, error_on_import: bool = False, help_message: Optional[str] = None
) -> ModuleType:
"""
Create a lazily-imported module to use in place of the module of the given name.
Use this to retain module-level imports for libraries that we don't want to
actually import until they are needed.
NOTE: Lazy-loading a subpackage can cause the subpackage to be imported
twice if another non-lazy import also imports the subpackage. For example,
using both `lazy_import("docker.errors")` and `import docker.errors` in the
same codebase will import `docker.errors` twice and can lead to unexpected
behavior, e.g. type check failures and import-time side effects running
twice.
Adapted from the [Python documentation][1] and [lazy_loader][2]
[1]: https://docs.python.org/3/library/importlib.html#implementing-lazy-imports
[2]: https://github.com/scientific-python/lazy_loader
"""
try:
return sys.modules[name]
except KeyError:
pass
if "." in name:
warnings.warn(
"Lazy importing subpackages can lead to unexpected behavior.",
RuntimeWarning,
)
spec = importlib.util.find_spec(name)
if spec is None:
import_error_message = f"No module named '{name}'.\n{help_message}"
if error_on_import:
raise ModuleNotFoundError(import_error_message)
return DelayedImportErrorModule(import_error_message, help_message)
module = importlib.util.module_from_spec(spec)
sys.modules[name] = module
if TYPE_CHECKING:
assert spec.loader is not None
loader = importlib.util.LazyLoader(spec.loader)
loader.exec_module(module)
return module
| DelayedImportErrorModule |
python | miyuchina__mistletoe | mistletoe/contrib/pygments_renderer.py | {
"start": 290,
"end": 1759
} | class ____(HtmlRenderer):
formatter = HtmlFormatter()
formatter.noclasses = True
def __init__(self, *extras, style='default', fail_on_unsupported_language=False, **kwargs):
"""
Args:
extras (list): allows subclasses to add even more custom tokens.
style (str): short name of the style to be used by Pygments' `HtmlFormatter`,
see `pygments.styles.get_style_by_name()`.
fail_on_unsupported_language (bool): whether to let Pygments' `ClassNotFound`
be thrown when there is an unsupported language found on
a code block.
If `False`, then language is guessed instead of throwing the error.
**kwargs: additional parameters to be passed to the ancestor's
constructor.
"""
super().__init__(*extras, **kwargs)
self.formatter.style = get_style(style)
self.fail_on_unsupported_language = fail_on_unsupported_language
def render_block_code(self, token):
code = token.content
lexer = None
if token.language:
try:
lexer = get_lexer(token.language)
except ClassNotFound as err:
if self.fail_on_unsupported_language:
raise err
if lexer is None:
lexer = guess_lexer(code)
return highlight(code, lexer, self.formatter)
| PygmentsRenderer |
python | eventlet__eventlet | tests/db_pool_test.py | {
"start": 310,
"end": 1394
} | class ____:
__test__ = False # so that nose doesn't try to execute this directly
def setUp(self):
self.create_db()
self.connection = None
connection = self._dbmodule.connect(**self._auth)
cursor = connection.cursor()
cursor.execute("""CREATE TABLE gargleblatz
(
a INTEGER
);""")
connection.commit()
cursor.close()
connection.close()
def tearDown(self):
if self.connection:
self.connection.close()
self.drop_db()
def set_up_dummy_table(self, connection=None):
close_connection = False
if connection is None:
close_connection = True
if self.connection is None:
connection = self._dbmodule.connect(**self._auth)
else:
connection = self.connection
cursor = connection.cursor()
cursor.execute(self.dummy_table_sql)
connection.commit()
cursor.close()
if close_connection:
connection.close()
# silly mock class
| DBTester |
python | xlwings__xlwings | xlwings/expansion.py | {
"start": 494,
"end": 1269
} | class ____(Expander):
def expand(self, rng):
origin = rng(1, 1)
if origin.has_array:
bottom_left = origin.end("down")
elif origin(2, 1).raw_value in _empty:
bottom_left = origin
elif origin(3, 1).raw_value in _empty:
bottom_left = origin(2, 1)
else:
bottom_left = origin(2, 1).end("down")
if origin.has_array:
top_right = origin.end("right")
elif origin(1, 2).raw_value in _empty:
top_right = origin
elif origin(1, 3).raw_value in _empty:
top_right = origin(1, 2)
else:
top_right = origin(1, 2).end("right")
return Range(top_right, bottom_left)
TableExpander().register("table")
| TableExpander |
python | pytorch__pytorch | torch/_dynamo/variables/higher_order_ops.py | {
"start": 109637,
"end": 110304
} | class ____(FunctorchHigherOrderVariable):
def call_function(
self, tx, args: list[VariableTracker], kwargs: dict[str, VariableTracker]
) -> VariableTracker:
if not torch._dynamo.config.inline_inbuilt_nn_modules:
unimplemented(
gb_type="torch.func.functional_call capture is disabled",
context="",
explanation="torch.func.functional_call capture is disabled",
hints=[
"Set `torch._dynamo.config.inline_inbuilt_nn_modules=True` to enable.",
],
)
return super().call_function(tx, args, kwargs)
| FunctionalCallVariable |
python | joke2k__faker | faker/providers/person/__init__.py | {
"start": 113,
"end": 9525
} | class ____(BaseProvider):
formats: ElementsType[str] = ["{{first_name}} {{last_name}}"]
first_names: ElementsType[str] = ["John", "Jane"]
last_names: ElementsType[str] = ["Doe"]
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
language_names: ElementsType[str] = [
"Afar",
"Abkhazian",
"Avestan",
"Afrikaans",
"Akan",
"Amharic",
"Aragonese",
"Arabic",
"Assamese",
"Avaric",
"Aymara",
"Azerbaijani",
"Bashkir",
"Belarusian",
"Bulgarian",
"Bihari languages",
"Bislama",
"Bambara",
"Bengali",
"Tibetan",
"Breton",
"Bosnian",
"Catalan",
"Chechen",
"Chamorro",
"Corsican",
"Cree",
"Czech",
"Church Slavic",
"Chuvash",
"Welsh",
"Danish",
"German",
"Divehi",
"Dzongkha",
"Ewe",
"Greek",
"English",
"Esperanto",
"Spanish",
"Estonian",
"Basque",
"Persian",
"Fulah",
"Finnish",
"Fijian",
"Faroese",
"French",
"Western Frisian",
"Irish",
"Gaelic",
"Galician",
"Guarani",
"Gujarati",
"Manx",
"Hausa",
"Hebrew",
"Hindi",
"Hiri Motu",
"Croatian",
"Haitian",
"Hungarian",
"Armenian",
"Herero",
"Interlingua",
"Indonesian",
"Interlingue",
"Igbo",
"Sichuan Yi",
"Inupiaq",
"Ido",
"Icelandic",
"Italian",
"Inuktitut",
"Japanese",
"Javanese",
"Georgian",
"Kongo",
"Kikuyu",
"Kuanyama",
"Kazakh",
"Kalaallisut",
"Central Khmer",
"Kannada",
"Korean",
"Kanuri",
"Kashmiri",
"Kurdish",
"Komi",
"Cornish",
"Kirghiz",
"Latin",
"Luxembourgish",
"Ganda",
"Limburgan",
"Lingala",
"Lao",
"Lithuanian",
"Luba-Katanga",
"Latvian",
"Malagasy",
"Marshallese",
"Maori",
"Macedonian",
"Malayalam",
"Mongolian",
"Marathi",
"Malay",
"Maltese",
"Burmese",
"Nauru",
"North Ndebele",
"Nepali",
"Ndonga",
"Dutch",
"Norwegian Nynorsk",
"Norwegian",
"South Ndebele",
"Navajo",
"Chichewa",
"Occitan",
"Ojibwa",
"Oromo",
"Oriya",
"Ossetian",
"Panjabi",
"Pali",
"Polish",
"Pushto",
"Portuguese",
"Quechua",
"Romansh",
"Rundi",
"Romanian",
"Russian",
"Kinyarwanda",
"Sanskrit",
"Sardinian",
"Sindhi",
"Northern Sami",
"Sango",
"Sinhala",
"Slovak",
"Slovenian",
"Samoan",
"Shona",
"Somali",
"Albanian",
"Serbian",
"Swati",
"Sotho, Southern",
"Sundanese",
"Swedish",
"Swahili",
"Tamil",
"Telugu",
"Tajik",
"Thai",
"Tigrinya",
"Turkmen",
"Tagalog",
"Tswana",
"Tonga",
"Turkish",
"Tsonga",
"Tatar",
"Twi",
"Tahitian",
"Uighur",
"Ukrainian",
"Urdu",
"Uzbek",
"Venda",
"Vietnamese",
"Walloon",
"Wolof",
"Xhosa",
"Yiddish",
"Yoruba",
"Zhuang",
"Chinese",
"Zulu",
]
def name(self) -> str:
"""
:example: 'John Doe'
"""
pattern: str = self.random_element(self.formats)
return self.generator.parse(pattern)
def first_name(self) -> str:
return self.random_element(self.first_names)
def last_name(self) -> str:
return self.random_element(self.last_names)
def name_male(self) -> str:
if hasattr(self, "formats_male"):
formats = self.formats_male # type: ignore[attr-defined]
else:
formats = self.formats
pattern: str = self.random_element(formats)
return self.generator.parse(pattern)
def name_nonbinary(self) -> str:
if hasattr(self, "formats_nonbinary"):
formats = self.formats_nonbinary # type: ignore[attr-defined]
else:
formats = self.formats
pattern: str = self.random_element(formats)
return self.generator.parse(pattern)
def name_female(self) -> str:
if hasattr(self, "formats_female"):
formats = self.formats_female # type: ignore[attr-defined]
else:
formats = self.formats
pattern: str = self.random_element(formats)
return self.generator.parse(pattern)
def first_name_male(self) -> str:
if hasattr(self, "first_names_male"):
return self.random_element(self.first_names_male) # type: ignore[attr-defined]
return self.first_name()
def first_name_nonbinary(self) -> str:
if hasattr(self, "first_names_nonbinary"):
return self.random_element(self.first_names_nonbinary) # type: ignore[attr-defined]
return self.first_name()
def first_name_female(self) -> str:
if hasattr(self, "first_names_female"):
return self.random_element(self.first_names_female) # type: ignore[attr-defined]
return self.first_name()
def last_name_male(self) -> str:
if hasattr(self, "last_names_male"):
return self.random_element(self.last_names_male) # type: ignore[attr-defined]
return self.last_name()
def last_name_nonbinary(self) -> str:
if hasattr(self, "last_names_nonbinary"):
return self.random_element(self.last_names_nonbinary) # type: ignore[attr-defined]
return self.last_name()
def last_name_female(self) -> str:
if hasattr(self, "last_names_female"):
return self.random_element(self.last_names_female) # type: ignore[attr-defined]
return self.last_name()
def prefix(self) -> str:
if hasattr(self, "prefixes"):
return self.random_element(self.prefixes) # type: ignore[attr-defined]
if hasattr(self, "prefixes_male") and hasattr(self, "prefixes_female") and hasattr(self, "prefixes_nonbinary"):
prefixes = add_ordereddicts(
self.prefixes_male, # type: ignore[attr-defined]
self.prefixes_female, # type: ignore[attr-defined]
self.prefixes_nonbinary, # type: ignore[attr-defined]
)
return self.random_element(prefixes)
if hasattr(self, "prefixes_male") and hasattr(self, "prefixes_female"):
prefixes = self.random_element((self.prefixes_male, self.prefixes_female)) # type: ignore[attr-defined]
return self.random_element(prefixes)
return ""
def prefix_male(self) -> str:
if hasattr(self, "prefixes_male"):
return self.random_element(self.prefixes_male) # type: ignore[attr-defined]
return self.prefix()
def prefix_nonbinary(self) -> str:
if hasattr(self, "prefixes_nonbinary"):
return self.random_element(self.prefixes_nonbinary) # type: ignore[attr-defined]
return self.prefix()
def prefix_female(self) -> str:
if hasattr(self, "prefixes_female"):
return self.random_element(self.prefixes_female) # type: ignore[attr-defined]
return self.prefix()
def suffix(self) -> str:
if hasattr(self, "suffixes"):
return self.random_element(self.suffixes) # type: ignore[attr-defined]
if hasattr(self, "suffixes_male") and hasattr(self, "suffixes_female") and hasattr(self, "suffixes_nonbinary"):
suffixes = add_ordereddicts(
self.suffixes_male, # type: ignore[attr-defined]
self.suffixes_female, # type: ignore[attr-defined]
self.suffixes_nonbinary, # type: ignore[attr-defined]
)
return self.random_element(suffixes)
if hasattr(self, "suffixes_male") and hasattr(self, "suffixes_female"):
suffixes = self.random_element((self.suffixes_male, self.suffixes_female)) # type: ignore[attr-defined]
return self.random_element(suffixes)
return ""
def suffix_male(self) -> str:
if hasattr(self, "suffixes_male"):
return self.random_element(self.suffixes_male) # type: ignore[attr-defined]
return self.suffix()
def suffix_nonbinary(self) -> str:
if hasattr(self, "suffixes_nonbinary"):
return self.random_element(self.suffixes_nonbinary) # type: ignore[attr-defined]
return self.suffix()
def suffix_female(self) -> str:
if hasattr(self, "suffixes_female"):
return self.random_element(self.suffixes_female) # type: ignore[attr-defined]
return self.suffix()
def language_name(self) -> str:
"""Generate a random i18n language name (e.g. English)."""
return self.random_element(self.language_names)
| Provider |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 15447,
"end": 15826
} | class ____(PointEvent):
''' Announce a press event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'press'
| Press |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 137834,
"end": 138981
} | class ____(Response):
"""
Response of projects.make_private endpoint.
:param updated: Number of projects updated
:type updated: int
"""
_service = "projects"
_action = "make_private"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of projects updated",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(MakePrivateResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| MakePrivateResponse |
python | pytorch__pytorch | torch/_subclasses/fake_tensor.py | {
"start": 43232,
"end": 43916
} | class ____:
"""
Entry type for the FakeTensor dispatch cache for an output. Accounts for three
possibilities:
1) The op is inplace, and a hit means we need to alias the argument at a
given index.
2) We need to synthesize a new FakeTensor given tensor metadata. For view
ops, we further capture the index of the arg to alias.
3) if the tensor related fields are None, then it is a constant value (e.g.
None or integer)
"""
inplace_idx: Optional[int]
metadata: Optional[TensorMetadata]
view_idx: Optional[int]
constant_value: Optional[Any] = SingletonConstant
@dataclass(frozen=True, slots=True)
| _DispatchCacheEntryOutputInfo |
python | getsentry__sentry | src/sentry/quotas/base.py | {
"start": 8835,
"end": 9184
} | class ____(RateLimit):
def __init__(self, **kwargs):
super().__init__(True, **kwargs)
def _limit_from_settings(x: Any) -> int | None:
"""
limit=0 (or any falsy value) in database means "no limit". Convert that to
limit=None as limit=0 in code means "reject all".
"""
return int(x or 0) or None
@dataclass
| RateLimited |
python | python-openxml__python-docx | tests/opc/test_pkgreader.py | {
"start": 14117,
"end": 14766
} | class ____:
def it_remembers_construction_values(self):
# test data --------------------
partname = "/part/name.xml"
content_type = "app/vnd.type"
reltype = "http://rel/type"
blob = "<Part/>"
srels = "srels proxy"
# exercise ---------------------
spart = _SerializedPart(partname, content_type, reltype, blob, srels)
# verify -----------------------
assert spart.partname == partname
assert spart.content_type == content_type
assert spart.reltype == reltype
assert spart.blob == blob
assert spart.srels == srels
| Describe_SerializedPart |
python | django__django | tests/cache/tests.py | {
"start": 73555,
"end": 74606
} | class ____(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_only_initialized(self):
with self.settings(
CACHES={
"cache_1": {
"BACKEND": "cache.closeable_cache.CacheClass",
},
"cache_2": {
"BACKEND": "cache.closeable_cache.CacheClass",
},
}
):
self.assertEqual(caches.all(initialized_only=True), [])
signals.request_finished.send(self.__class__)
self.assertEqual(caches.all(initialized_only=True), [])
DEFAULT_MEMORY_CACHES_SETTINGS = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "unique-snowflake",
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS["default"]["TIMEOUT"] = None
| CacheClosingTests |
python | django__django | tests/i18n/patterns/tests.py | {
"start": 14740,
"end": 16797
} | class ____(URLTestCaseBase):
"""Tests if the response has the correct language code."""
def test_not_prefixed_with_prefix(self):
response = self.client.get("/en/not-prefixed/")
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get("/en/account/register/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "en")
self.assertEqual(response.context["LANGUAGE_CODE"], "en")
def test_nl_url(self):
response = self.client.get("/nl/profiel/registreren/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "nl")
self.assertEqual(response.context["LANGUAGE_CODE"], "nl")
def test_wrong_en_prefix(self):
response = self.client.get("/en/profiel/registreren/")
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get("/nl/account/register/")
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get("/pt-br/conta/registre-se/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "pt-br")
self.assertEqual(response.context["LANGUAGE_CODE"], "pt-br")
def test_en_path(self):
response = self.client.get("/en/account/register-as-path/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "en")
self.assertEqual(response.context["LANGUAGE_CODE"], "en")
def test_nl_path(self):
response = self.client.get("/nl/profiel/registreren-als-pad/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "nl")
self.assertEqual(response.context["LANGUAGE_CODE"], "nl")
@override_settings(ROOT_URLCONF="i18n.urls_default_unprefixed", LANGUAGE_CODE="nl")
| URLResponseTests |
python | ipython__ipython | tests/test_pretty.py | {
"start": 1176,
"end": 1240
} | class ____(object):
pass
NoModule.__module__ = None
| NoModule |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_baseexception.py | {
"start": 7331,
"end": 9627
} | class ____(__TestCase):
"""Test usage of exceptions"""
def raise_fails(self, object_):
"""Make sure that raising 'object_' triggers a TypeError."""
try:
raise object_
except TypeError:
return # What is expected.
self.fail("TypeError expected for raising %s" % type(object_))
def catch_fails(self, object_):
"""Catching 'object_' should raise a TypeError."""
try:
try:
raise Exception
except object_:
pass
except TypeError:
pass
except Exception:
self.fail("TypeError expected when catching %s" % type(object_))
try:
try:
raise Exception
except (object_,):
pass
except TypeError:
return
except Exception:
self.fail("TypeError expected when catching %s as specified in a "
"tuple" % type(object_))
def test_raise_new_style_non_exception(self):
# You cannot raise a new-style class that does not inherit from
# BaseException; the ability was not possible until BaseException's
# introduction so no need to support new-style objects that do not
# inherit from it.
with torch._dynamo.error_on_graph_break(False):
class NewStyleClass(object):
pass
self.raise_fails(NewStyleClass)
self.raise_fails(NewStyleClass())
def test_raise_string(self):
# Raising a string raises TypeError.
self.raise_fails("spam")
def test_catch_non_BaseException(self):
# Trying to catch an object that does not inherit from BaseException
# is not allowed.
with torch._dynamo.error_on_graph_break(False):
class NonBaseException(object):
pass
self.catch_fails(NonBaseException)
self.catch_fails(NonBaseException())
def test_catch_BaseException_instance(self):
# Catching an instance of a BaseException subclass won't work.
self.catch_fails(BaseException())
def test_catch_string(self):
# Catching a string is bad.
self.catch_fails("spam")
if __name__ == "__main__":
run_tests()
| UsageTests |
python | kamyu104__LeetCode-Solutions | Python/closest-room.py | {
"start": 95,
"end": 1263
} | class ____(object):
def closestRoom(self, rooms, queries):
"""
:type rooms: List[List[int]]
:type queries: List[List[int]]
:rtype: List[int]
"""
def find_closest(ids, r):
result, min_dist = -1, float("inf")
i = ids.bisect_right(r)
if i-1 >= 0 and abs(ids[i-1]-r) < min_dist:
min_dist = abs(ids[i-1]-r)
result = ids[i-1]
if i < len(ids) and abs(ids[i]-r) < min_dist:
min_dist = abs(ids[i]-r)
result = ids[i]
return result
rooms.sort(key=lambda x: x[1], reverse=True)
for i, q in enumerate(queries):
q.append(i)
queries.sort(key=lambda x: x[1], reverse=True)
ids = SortedList()
i = 0
result = [-1]*len(queries)
for r, s, idx in queries:
while i < len(rooms) and rooms[i][1] >= s:
ids.add(rooms[i][0])
i += 1
result[idx] = find_closest(ids, r)
return result
# Time: O(nlogn + klogk + klogn)
# Space: O(n + k)
from sortedcontainers import SortedList
| Solution |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/clients/test_web_application.py | {
"start": 498,
"end": 12569
} | class ____(TestCase):
client_id = "someclientid"
client_secret = 'someclientsecret'
uri = "https://example.com/path?query=world"
uri_id = uri + "&response_type=code&client_id=" + client_id
uri_redirect = uri_id + "&redirect_uri=http%3A%2F%2Fmy.page.com%2Fcallback"
redirect_uri = "http://my.page.com/callback"
code_verifier = "code_verifier"
scope = ["/profile"]
state = "xyz"
code_challenge = "code_challenge"
code_challenge_method = "S256"
uri_scope = uri_id + "&scope=%2Fprofile"
uri_state = uri_id + "&state=" + state
uri_code_challenge = uri_id + "&code_challenge=" + code_challenge + "&code_challenge_method=" + code_challenge_method
uri_code_challenge_method = uri_id + "&code_challenge=" + code_challenge + "&code_challenge_method=plain"
kwargs = {
"some": "providers",
"require": "extra arguments"
}
uri_kwargs = uri_id + "&some=providers&require=extra+arguments"
uri_authorize_code = uri_redirect + "&scope=%2Fprofile&state=" + state
code = "zzzzaaaa"
body = "not=empty"
body_code = "not=empty&grant_type=authorization_code&code={}&client_id={}".format(code, client_id)
body_redirect = body_code + "&redirect_uri=http%3A%2F%2Fmy.page.com%2Fcallback"
body_code_verifier = body_code + "&code_verifier=code_verifier"
body_kwargs = body_code + "&some=providers&require=extra+arguments"
response_uri = "https://client.example.com/cb?code=zzzzaaaa&state=xyz"
response = {"code": "zzzzaaaa", "state": "xyz"}
token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
' "token_type":"example",'
' "expires_in":3600,'
' "scope":"/profile",'
' "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",'
' "example_parameter":"example_value"}')
token = {
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"expires_at": 4600,
"scope": scope,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
}
def test_auth_grant_uri(self):
client = WebApplicationClient(self.client_id)
# Basic, no extra arguments
uri = client.prepare_request_uri(self.uri)
self.assertURLEqual(uri, self.uri_id)
# With redirection uri
uri = client.prepare_request_uri(self.uri, redirect_uri=self.redirect_uri)
self.assertURLEqual(uri, self.uri_redirect)
# With scope
uri = client.prepare_request_uri(self.uri, scope=self.scope)
self.assertURLEqual(uri, self.uri_scope)
# With state
uri = client.prepare_request_uri(self.uri, state=self.state)
self.assertURLEqual(uri, self.uri_state)
# with code_challenge and code_challenge_method
uri = client.prepare_request_uri(self.uri, code_challenge=self.code_challenge, code_challenge_method=self.code_challenge_method)
self.assertURLEqual(uri, self.uri_code_challenge)
# with no code_challenge_method
uri = client.prepare_request_uri(self.uri, code_challenge=self.code_challenge)
self.assertURLEqual(uri, self.uri_code_challenge_method)
# With extra parameters through kwargs
uri = client.prepare_request_uri(self.uri, **self.kwargs)
self.assertURLEqual(uri, self.uri_kwargs)
def test_request_body(self):
client = WebApplicationClient(self.client_id, code=self.code)
# Basic, no extra arguments
body = client.prepare_request_body(body=self.body)
self.assertFormBodyEqual(body, self.body_code)
rclient = WebApplicationClient(self.client_id)
body = rclient.prepare_request_body(code=self.code, body=self.body)
self.assertFormBodyEqual(body, self.body_code)
# With redirection uri
body = client.prepare_request_body(body=self.body, redirect_uri=self.redirect_uri)
self.assertFormBodyEqual(body, self.body_redirect)
# With code verifier
body = client.prepare_request_body(body=self.body, code_verifier=self.code_verifier)
self.assertFormBodyEqual(body, self.body_code_verifier)
# With extra parameters
body = client.prepare_request_body(body=self.body, **self.kwargs)
self.assertFormBodyEqual(body, self.body_kwargs)
def test_parse_grant_uri_response(self):
client = WebApplicationClient(self.client_id)
# Parse code and state
response = client.parse_request_uri_response(self.response_uri, state=self.state)
self.assertEqual(response, self.response)
self.assertEqual(client.code, self.code)
# Mismatching state
self.assertRaises(errors.MismatchingStateError,
client.parse_request_uri_response,
self.response_uri,
state="invalid")
def test_populate_attributes(self):
client = WebApplicationClient(self.client_id)
response_uri = (self.response_uri +
"&access_token=EVIL-TOKEN"
"&refresh_token=EVIL-TOKEN"
"&mac_key=EVIL-KEY")
client.parse_request_uri_response(response_uri, self.state)
self.assertEqual(client.code, self.code)
# We must not accidentally pick up any further security
# credentials at this point.
self.assertIsNone(client.access_token)
self.assertIsNone(client.refresh_token)
self.assertIsNone(client.mac_key)
def test_parse_token_response(self):
client = WebApplicationClient(self.client_id)
# Parse code and state
response = client.parse_request_body_response(self.token_json, scope=self.scope)
self.assertEqual(response, self.token)
self.assertEqual(client.access_token, response.get("access_token"))
self.assertEqual(client.refresh_token, response.get("refresh_token"))
self.assertEqual(client.token_type, response.get("token_type"))
# Mismatching state
self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid")
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
token = client.parse_request_body_response(self.token_json, scope="invalid")
self.assertTrue(token.scope_changed)
scope_changes_recorded = []
def record_scope_change(sender, message, old, new):
scope_changes_recorded.append((message, old, new))
signals.scope_changed.connect(record_scope_change)
try:
client.parse_request_body_response(self.token_json, scope="invalid")
self.assertEqual(len(scope_changes_recorded), 1)
message, old, new = scope_changes_recorded[0]
self.assertEqual(message, 'Scope has changed from "invalid" to "/profile".')
self.assertEqual(old, ['invalid'])
self.assertEqual(new, ['/profile'])
finally:
signals.scope_changed.disconnect(record_scope_change)
del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
def test_prepare_authorization_requeset(self):
client = WebApplicationClient(self.client_id)
url, header, body = client.prepare_authorization_request(
self.uri, redirect_url=self.redirect_uri, state=self.state, scope=self.scope)
self.assertURLEqual(url, self.uri_authorize_code)
# verify default header and body only
self.assertEqual(header, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(body, '')
def test_prepare_request_body(self):
"""
see issue #585
https://github.com/oauthlib/oauthlib/issues/585
`prepare_request_body` should support the following scenarios:
1. Include client_id alone in the body (default)
2. Include client_id and client_secret in auth and not include them in the body (RFC preferred solution)
3. Include client_id and client_secret in the body (RFC alternative solution)
4. Include client_id in the body and an empty string for client_secret.
"""
client = WebApplicationClient(self.client_id)
# scenario 1, default behavior to include `client_id`
r1 = client.prepare_request_body()
self.assertEqual(r1, 'grant_type=authorization_code&client_id=%s' % self.client_id)
r1b = client.prepare_request_body(include_client_id=True)
self.assertEqual(r1b, 'grant_type=authorization_code&client_id=%s' % self.client_id)
# scenario 2, do not include `client_id` in the body, so it can be sent in auth.
r2 = client.prepare_request_body(include_client_id=False)
self.assertEqual(r2, 'grant_type=authorization_code')
# scenario 3, Include client_id and client_secret in the body (RFC alternative solution)
# the order of kwargs being appended is not guaranteed. for brevity, check the 2 permutations instead of sorting
r3 = client.prepare_request_body(client_secret=self.client_secret)
r3_params = dict(urlparse.parse_qsl(r3, keep_blank_values=True))
self.assertEqual(len(r3_params.keys()), 3)
self.assertEqual(r3_params['grant_type'], 'authorization_code')
self.assertEqual(r3_params['client_id'], self.client_id)
self.assertEqual(r3_params['client_secret'], self.client_secret)
r3b = client.prepare_request_body(include_client_id=True, client_secret=self.client_secret)
r3b_params = dict(urlparse.parse_qsl(r3b, keep_blank_values=True))
self.assertEqual(len(r3b_params.keys()), 3)
self.assertEqual(r3b_params['grant_type'], 'authorization_code')
self.assertEqual(r3b_params['client_id'], self.client_id)
self.assertEqual(r3b_params['client_secret'], self.client_secret)
# scenario 4, `client_secret` is an empty string
r4 = client.prepare_request_body(include_client_id=True, client_secret='')
r4_params = dict(urlparse.parse_qsl(r4, keep_blank_values=True))
self.assertEqual(len(r4_params.keys()), 3)
self.assertEqual(r4_params['grant_type'], 'authorization_code')
self.assertEqual(r4_params['client_id'], self.client_id)
self.assertEqual(r4_params['client_secret'], '')
# scenario 4b, `client_secret` is `None`
r4b = client.prepare_request_body(include_client_id=True, client_secret=None)
r4b_params = dict(urlparse.parse_qsl(r4b, keep_blank_values=True))
self.assertEqual(len(r4b_params.keys()), 2)
self.assertEqual(r4b_params['grant_type'], 'authorization_code')
self.assertEqual(r4b_params['client_id'], self.client_id)
# scenario Warnings
# warning1 - raise a DeprecationWarning if a `client_id` is submitted
with self.assertWarns(DeprecationWarning):
client.prepare_request_body(client_id=self.client_id)
# testing the exact warning message in Python2&Python3 is a pain
# scenario Exceptions
# exception1 - raise a ValueError if the a different `client_id` is submitted
with self.assertWarns(DeprecationWarning), self.assertRaises(ValueError):
client.prepare_request_body(client_id='different_client_id')
# testing the exact exception message in Python2&Python3 is a pain
def test_expires_in_as_str(self):
"""
see regression issue #906
"""
client = WebApplicationClient(
client_id="dummy",
token={"access_token": "xyz", "expires_in": "3600"}
)
self.assertIsNotNone(client)
client = WebApplicationClient(
client_id="dummy",
token={"access_token": "xyz", "expires_in": 3600}
)
self.assertIsNotNone(client)
client = WebApplicationClient(
client_id="dummy",
token={"access_token": "xyz", "expires_in": 3600.12}
)
self.assertIsNotNone(client)
| WebApplicationClientTest |
python | kamyu104__LeetCode-Solutions | Python/broken-calculator.py | {
"start": 32,
"end": 350
} | class ____(object):
def brokenCalc(self, X, Y):
"""
:type X: int
:type Y: int
:rtype: int
"""
result = 0
while X < Y:
if Y%2:
Y += 1
else:
Y /= 2
result += 1
return result + X-Y
| Solution |
python | ipython__ipython | IPython/core/logger.py | {
"start": 1015,
"end": 8436
} | class ____:
"""A Logfile class with different policies for file creation"""
def __init__(self, home_dir, logfname='Logger.log', loghead=u'',
logmode='over'):
# this is the full ipython instance, we need some attributes from it
# which won't exist until later. What a mess, clean up later...
self.home_dir = home_dir
self.logfname = logfname
self.loghead = loghead
self.logmode = logmode
self.logfile = None
# Whether to log raw or processed input
self.log_raw_input = False
# whether to also log output
self.log_output = False
# whether to put timestamps before each log entry
self.timestamp = False
# activity control flags
self.log_active = False
# logmode is a validated property
def _set_mode(self,mode):
if mode not in ['append','backup','global','over','rotate']:
raise ValueError('invalid log mode %s given' % mode)
self._logmode = mode
def _get_mode(self):
return self._logmode
logmode = property(_get_mode,_set_mode)
def logstart(self, logfname=None, loghead=None, logmode=None,
log_output=False, timestamp=False, log_raw_input=False):
"""Generate a new log-file with a default header.
Raises RuntimeError if the log has already been started"""
if self.logfile is not None:
raise RuntimeError('Log file is already active: %s' %
self.logfname)
# The parameters can override constructor defaults
if logfname is not None: self.logfname = logfname
if loghead is not None: self.loghead = loghead
if logmode is not None: self.logmode = logmode
# Parameters not part of the constructor
self.timestamp = timestamp
self.log_output = log_output
self.log_raw_input = log_raw_input
# init depending on the log mode requested
isfile = os.path.isfile
logmode = self.logmode
if logmode == 'append':
self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
elif logmode == 'backup':
if isfile(self.logfname):
backup_logname = self.logfname+'~'
# Manually remove any old backup, since os.rename may fail
# under Windows.
if isfile(backup_logname):
os.remove(backup_logname)
os.rename(self.logfname,backup_logname)
self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
elif logmode == 'global':
self.logfname = os.path.join(self.home_dir,self.logfname)
self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
elif logmode == 'over':
if isfile(self.logfname):
os.remove(self.logfname)
self.logfile = io.open(self.logfname,'w', encoding='utf-8')
elif logmode == 'rotate':
if isfile(self.logfname):
if isfile(self.logfname+'.001~'):
old = glob.glob(self.logfname+'.*~')
old.sort()
old.reverse()
for f in old:
root, ext = os.path.splitext(f)
num = int(ext[1:-1])+1
os.rename(f, root+'.'+repr(num).zfill(3)+'~')
os.rename(self.logfname, self.logfname+'.001~')
self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
if logmode != 'append':
self.logfile.write(self.loghead)
self.logfile.flush()
self.log_active = True
def switch_log(self,val):
"""Switch logging on/off. val should be ONLY a boolean."""
if val not in [False,True,0,1]:
raise ValueError('Call switch_log ONLY with a boolean argument, '
'not with: %s' % val)
label = {0:'OFF',1:'ON',False:'OFF',True:'ON'}
if self.logfile is None:
print("""
Logging hasn't been started yet (use logstart for that).
%logon/%logoff are for temporarily starting and stopping logging for a logfile
which already exists. But you must first start the logging process with
%logstart (optionally giving a logfile name).""")
else:
if self.log_active == val:
print('Logging is already',label[val])
else:
print('Switching logging',label[val])
self.log_active = not self.log_active
self.log_active_out = self.log_active
def logstate(self):
"""Print a status message about the logger."""
if self.logfile is None:
print('Logging has not been activated.')
else:
state = self.log_active and 'active' or 'temporarily suspended'
print('Filename :', self.logfname)
print('Mode :', self.logmode)
print('Output logging :', self.log_output)
print('Raw input log :', self.log_raw_input)
print('Timestamping :', self.timestamp)
print('State :', state)
def log(self, line_mod, line_ori):
"""Write the sources to a log.
Inputs:
- line_mod: possibly modified input, such as the transformations made
by input prefilters or input handlers of various kinds. This should
always be valid Python.
- line_ori: unmodified input line from the user. This is not
necessarily valid Python.
"""
# Write the log line, but decide which one according to the
# log_raw_input flag, set when the log is started.
if self.log_raw_input:
self.log_write(line_ori)
else:
self.log_write(line_mod)
def log_write(self, data, kind='input'):
"""Write data to the log file, if active"""
# print('data: %r' % data) # dbg
if self.log_active and data:
write = self.logfile.write
if kind=='input':
if self.timestamp:
write(time.strftime('# %a, %d %b %Y %H:%M:%S\n', time.localtime()))
write(data)
elif kind=='output' and self.log_output:
odata = u'\n'.join([u'#[Out]# %s' % s
for s in data.splitlines()])
write(u'%s\n' % odata)
try:
self.logfile.flush()
except OSError:
print("Failed to flush the log file.")
print(
f"Please check that {self.logfname} exists and have the right permissions."
)
print(
"Also consider turning off the log with `%logstop` to avoid this warning."
)
def logstop(self):
"""Fully stop logging and close log file.
In order to start logging again, a new logstart() call needs to be
made, possibly (though not necessarily) with a new filename, mode and
other options."""
if self.logfile is not None:
self.logfile.close()
self.logfile = None
else:
print("Logging hadn't been started.")
self.log_active = False
# For backwards compatibility, in case anyone was using this.
close_log = logstop
| Logger |
python | django__django | tests/model_forms/models.py | {
"start": 15654,
"end": 16282
} | class ____(models.Model):
left = models.ForeignKey(
"self", related_name="+", null=True, on_delete=models.SET_NULL
)
right = models.ForeignKey(
"self", related_name="+", null=True, on_delete=models.SET_NULL
)
class Meta:
required_db_features = {"supports_table_check_constraints"}
constraints = [
models.CheckConstraint(
name="%(app_label)s_%(class)s_left_not_right",
# right_id here is the ForeignKey's attname, not name.
condition=~models.Q(left=models.F("right_id")),
),
]
| AttnameConstraintsModel |
python | mkdocs__mkdocs | mkdocs/tests/config/config_options_tests.py | {
"start": 58323,
"end": 66783
} | class ____(TestCase):
@mock.patch('markdown.Markdown', mock.Mock())
def test_simple_list(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions()
mdx_configs = c.Private[Dict[str, dict]]()
config = {
'markdown_extensions': ['foo', 'bar'],
}
conf = self.get_config(Schema, config)
assert_type(conf.markdown_extensions, List[str])
assert_type(conf.mdx_configs, Dict[str, dict])
self.assertEqual(conf.markdown_extensions, ['foo', 'bar'])
self.assertEqual(conf.mdx_configs, {})
@mock.patch('markdown.Markdown', mock.Mock())
def test_list_dicts(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions()
mdx_configs = c.Private[Dict[str, dict]]()
config = {
'markdown_extensions': [
{'foo': {'foo_option': 'foo value'}},
{'bar': {'bar_option': 'bar value'}},
{'baz': None},
]
}
conf = self.get_config(Schema, config)
self.assertEqual(conf.markdown_extensions, ['foo', 'bar', 'baz'])
self.assertEqual(
conf.mdx_configs,
{
'foo': {'foo_option': 'foo value'},
'bar': {'bar_option': 'bar value'},
},
)
@mock.patch('markdown.Markdown', mock.Mock())
def test_mixed_list(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions()
mdx_configs = c.Private[Dict[str, dict]]()
config = {
'markdown_extensions': [
'foo',
{'bar': {'bar_option': 'bar value'}},
]
}
conf = self.get_config(Schema, config)
self.assertEqual(conf.markdown_extensions, ['foo', 'bar'])
self.assertEqual(
conf.mdx_configs,
{
'bar': {'bar_option': 'bar value'},
},
)
@mock.patch('markdown.Markdown', mock.Mock())
def test_dict_of_dicts(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions()
mdx_configs = c.Private[Dict[str, dict]]()
config = {
'markdown_extensions': {
'foo': {'foo_option': 'foo value'},
'bar': {'bar_option': 'bar value'},
'baz': {},
}
}
conf = self.get_config(Schema, config)
self.assertEqual(conf.markdown_extensions, ['foo', 'bar', 'baz'])
self.assertEqual(
conf.mdx_configs,
{
'foo': {'foo_option': 'foo value'},
'bar': {'bar_option': 'bar value'},
},
)
@mock.patch('markdown.Markdown', mock.Mock())
def test_builtins(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions(builtins=['meta', 'toc'])
mdx_configs = c.Private[Dict[str, dict]]()
config = {
'markdown_extensions': ['foo', 'bar'],
}
conf = self.get_config(Schema, config)
self.assertEqual(conf.markdown_extensions, ['meta', 'toc', 'foo', 'bar'])
self.assertEqual(conf.mdx_configs, {})
def test_duplicates(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions(builtins=['meta', 'toc'])
mdx_configs = c.Private[Dict[str, dict]]()
config = {
'markdown_extensions': ['meta', 'toc'],
}
conf = self.get_config(Schema, config)
self.assertEqual(conf.markdown_extensions, ['meta', 'toc'])
self.assertEqual(conf.mdx_configs, {})
def test_builtins_config(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions(builtins=['meta', 'toc'])
mdx_configs = c.Private[Dict[str, dict]]()
config = {
'markdown_extensions': [
{'toc': {'permalink': True}},
],
}
conf = self.get_config(Schema, config)
self.assertEqual(conf.markdown_extensions, ['meta', 'toc'])
self.assertEqual(conf.mdx_configs, {'toc': {'permalink': True}})
@mock.patch('markdown.Markdown', mock.Mock())
def test_configkey(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions(configkey='bar')
bar = c.Private[Dict[str, dict]]()
config = {
'markdown_extensions': [
{'foo': {'foo_option': 'foo value'}},
]
}
conf = self.get_config(Schema, config)
self.assertEqual(conf.markdown_extensions, ['foo'])
self.assertEqual(
conf.bar,
{
'foo': {'foo_option': 'foo value'},
},
)
def test_missing_default(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions()
mdx_configs = c.Private[Dict[str, dict]]()
conf = self.get_config(Schema, {})
self.assertEqual(conf.markdown_extensions, [])
self.assertEqual(conf.mdx_configs, {})
def test_none(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions(default=[])
mdx_configs = c.Private[Dict[str, dict]]()
config = {
'markdown_extensions': None,
}
conf = self.get_config(Schema, config)
self.assertEqual(conf.markdown_extensions, [])
self.assertEqual(conf.mdx_configs, {})
@mock.patch('markdown.Markdown', mock.Mock())
def test_not_list(self) -> None:
class Schema(Config):
option = c.MarkdownExtensions()
with self.expect_error(option="Invalid Markdown Extensions configuration"):
self.get_config(Schema, {'option': 'not a list'})
@mock.patch('markdown.Markdown', mock.Mock())
def test_invalid_config_option(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions()
config = {
'markdown_extensions': [
{'foo': 'not a dict'},
],
}
with self.expect_error(
markdown_extensions="Invalid config options for Markdown Extension 'foo'."
):
self.get_config(Schema, config)
@mock.patch('markdown.Markdown', mock.Mock())
def test_invalid_config_item(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions()
config = {
'markdown_extensions': [
['not a dict'],
],
}
with self.expect_error(markdown_extensions="Invalid Markdown Extensions configuration"):
self.get_config(Schema, config)
@mock.patch('markdown.Markdown', mock.Mock())
def test_invalid_dict_item(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions()
config = {
'markdown_extensions': [
{'key1': 'value', 'key2': 'too many keys'},
],
}
with self.expect_error(markdown_extensions="Invalid Markdown Extensions configuration"):
self.get_config(Schema, config)
def test_unknown_extension(self) -> None:
class Schema(Config):
markdown_extensions = c.MarkdownExtensions()
config = {
'markdown_extensions': ['unknown'],
}
with self.expect_error(
markdown_extensions=re.compile(r"Failed to load extension 'unknown'.\n.+")
):
self.get_config(Schema, config)
def test_multiple_markdown_config_instances(self) -> None:
# This had a bug where an extension config would persist to separate
# config instances that didn't specify extensions.
class Schema(Config):
markdown_extensions = c.MarkdownExtensions()
mdx_configs = c.Private[Dict[str, dict]]()
conf = self.get_config(
Schema,
{
'markdown_extensions': [{'toc': {'permalink': '##'}}],
},
)
self.assertEqual(conf.mdx_configs['toc'], {'permalink': '##'})
conf = self.get_config(
Schema,
{},
)
self.assertIsNone(conf.mdx_configs.get('toc'))
| MarkdownExtensionsTest |
python | ansible__ansible | lib/ansible/plugins/callback/junit.py | {
"start": 3664,
"end": 13332
} | class ____(CallbackBase):
"""
This callback writes playbook output to a JUnit formatted XML file.
Tasks show up in the report as follows:
'ok': pass
'failed' with 'EXPECTED FAILURE' in the task name: pass
'failed' with 'TOGGLE RESULT' in the task name: pass
'ok' with 'TOGGLE RESULT' in the task name: failure
'failed' due to an exception: error
'failed' for other reasons: failure
'skipped': skipped
This plugin makes use of the following environment variables:
JUNIT_OUTPUT_DIR (optional): Directory to write XML files to.
Default: ~/.ansible.log
JUNIT_TASK_CLASS (optional): Configure the output to be one class per yaml file
Default: False
JUNIT_TASK_RELATIVE_PATH (optional): Configure the output to use relative paths to given directory
Default: none
JUNIT_FAIL_ON_CHANGE (optional): Consider any tasks reporting "changed" as a junit test failure
Default: False
JUNIT_FAIL_ON_IGNORE (optional): Consider failed tasks as a junit test failure even if ignore_on_error is set
Default: False
JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT (optional): Should the setup tasks be included in the final report
Default: True
JUNIT_HIDE_TASK_ARGUMENTS (optional): Hide the arguments for a task
Default: False
JUNIT_TEST_CASE_PREFIX (optional): Consider a task only as test case if it has this value as prefix. Additionally, failing tasks are recorded as failed
test cases.
Default: <empty>
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'junit'
CALLBACK_NEEDS_ENABLED = True
def __init__(self) -> None:
super(CallbackModule, self).__init__()
self._output_dir = os.getenv('JUNIT_OUTPUT_DIR', os.path.expanduser('~/.ansible.log'))
self._task_class = os.getenv('JUNIT_TASK_CLASS', 'False').lower()
self._task_relative_path = os.getenv('JUNIT_TASK_RELATIVE_PATH', '')
self._fail_on_change = os.getenv('JUNIT_FAIL_ON_CHANGE', 'False').lower()
self._fail_on_ignore = os.getenv('JUNIT_FAIL_ON_IGNORE', 'False').lower()
self._include_setup_tasks_in_report = os.getenv('JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT', 'True').lower()
self._hide_task_arguments = os.getenv('JUNIT_HIDE_TASK_ARGUMENTS', 'False').lower()
self._test_case_prefix = os.getenv('JUNIT_TEST_CASE_PREFIX', '')
self._replace_out_of_tree_path = os.getenv('JUNIT_REPLACE_OUT_OF_TREE_PATH', None)
self._playbook_path = None
self._playbook_name = None
self._play_name: str | None = None
self._task_data: dict[str, TaskData] = {}
self.disabled = False
if self._replace_out_of_tree_path is not None:
self._replace_out_of_tree_path = to_text(self._replace_out_of_tree_path)
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
def _start_task(self, task: Task) -> None:
""" record the start of a task for one or more hosts """
uuid = task._uuid
if uuid in self._task_data:
return
play = self._play_name
name = task.get_name().strip()
path = task.get_path()
action = task.action
if not task.no_log and self._hide_task_arguments == 'false':
args = ', '.join(('%s=%s' % a for a in task.args.items()))
if args:
name += ' ' + args
self._task_data[uuid] = TaskData(uuid, name, path, play, action)
def _finish_task(self, status: str, result: IncludedFile | CallbackTaskResult) -> None:
""" record the results of a task for a single host """
if isinstance(result, CallbackTaskResult):
task_uuid = result.task._uuid
host_uuid = result.host._uuid
host_name = result.host.name
if self._fail_on_change == 'true' and status == 'ok' and result.result.get('changed', False):
status = 'failed'
else:
task_uuid = result._task._uuid
host_uuid = 'include'
host_name = 'include'
task_data = self._task_data[task_uuid]
# ignore failure if expected and toggle result if asked for
if status == 'failed' and 'EXPECTED FAILURE' in task_data.name:
status = 'ok'
elif 'TOGGLE RESULT' in task_data.name:
if status == 'failed':
status = 'ok'
elif status == 'ok':
status = 'failed'
if task_data.name.startswith(self._test_case_prefix) or status == 'failed':
task_data.add_host(HostData(host_uuid, host_name, status, result))
def _build_test_case(self, task_data: TaskData, host_data: HostData) -> TestCase:
""" build a TestCase from the given TaskData and HostData """
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
duration = decimal.Decimal(host_data.finish - task_data.start)
if self._task_relative_path and task_data.path:
junit_classname = to_text(os.path.relpath(to_bytes(task_data.path), to_bytes(self._task_relative_path)))
else:
junit_classname = task_data.path
if self._replace_out_of_tree_path is not None and junit_classname.startswith('../'):
junit_classname = self._replace_out_of_tree_path + to_text(os.path.basename(to_bytes(junit_classname)))
if self._task_class == 'true':
junit_classname = re.sub(r'\.yml:[0-9]+$', '', junit_classname)
if host_data.status == 'included':
return TestCase(name=name, classname=junit_classname, time=duration, system_out=str(host_data.result))
task_result = t.cast(CallbackTaskResult, host_data.result)
res = task_result.result
rc = res.get('rc', 0)
dump = self._dump_results(res, indent=0)
dump = self._cleanse_string(dump)
if host_data.status == 'ok':
return TestCase(name=name, classname=junit_classname, time=duration, system_out=dump)
test_case = TestCase(name=name, classname=junit_classname, time=duration)
if host_data.status == 'failed':
if error_summary := task_result.exception:
message = _event_utils.format_event_brief_message(error_summary.event)
output = _event_formatting.format_event_traceback(error_summary.event)
test_case.errors.append(TestError(message=message, output=output))
elif 'msg' in res:
message = res['msg']
test_case.failures.append(TestFailure(message=message, output=dump))
else:
test_case.failures.append(TestFailure(message='rc=%s' % rc, output=dump))
elif host_data.status == 'skipped':
if 'skip_reason' in res:
message = res['skip_reason']
else:
message = 'skipped'
test_case.skipped = message
return test_case
@staticmethod
def _cleanse_string(value):
""" convert surrogate escapes to the unicode replacement character to avoid XML encoding errors """
return to_text(to_bytes(value, errors='surrogateescape'), errors='replace')
def _generate_report(self):
""" generate a TestSuite report from the collected TaskData and HostData """
test_cases = []
for task_uuid, task_data in self._task_data.items():
if task_data.action in constants._ACTION_SETUP and self._include_setup_tasks_in_report == 'false':
continue
for host_uuid, host_data in task_data.host_data.items():
test_cases.append(self._build_test_case(task_data, host_data))
test_suite = TestSuite(name=self._playbook_name, cases=test_cases)
test_suites = TestSuites(suites=[test_suite])
report = test_suites.to_pretty_xml()
output_file = os.path.join(self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time()))
with open(output_file, 'wb') as xml:
xml.write(to_bytes(report, errors='surrogate_or_strict'))
def v2_playbook_on_start(self, playbook):
self._playbook_path = playbook._file_name
self._playbook_name = os.path.splitext(os.path.basename(self._playbook_path))[0]
def v2_playbook_on_play_start(self, play):
self._play_name = play.get_name()
def v2_playbook_on_task_start(self, task: Task, is_conditional: bool) -> None:
self._start_task(task)
def v2_playbook_on_handler_task_start(self, task: Task) -> None:
self._start_task(task)
def v2_runner_on_failed(self, result: CallbackTaskResult, ignore_errors=False) -> None:
if ignore_errors and self._fail_on_ignore != 'true':
self._finish_task('ok', result)
else:
self._finish_task('failed', result)
def v2_runner_on_ok(self, result: CallbackTaskResult) -> None:
self._finish_task('ok', result)
def v2_runner_on_skipped(self, result: CallbackTaskResult) -> None:
self._finish_task('skipped', result)
def v2_playbook_on_include(self, included_file: IncludedFile) -> None:
self._finish_task('included', included_file)
def v2_playbook_on_stats(self, stats):
self._generate_report()
| CallbackModule |
python | sympy__sympy | sympy/physics/quantum/gate.py | {
"start": 16140,
"end": 16310
} | class ____(CGate):
"""Version of CGate that allows gate simplifications.
I.e. cnot looks like an oplus, cphase has dots, etc.
"""
simplify_cgate=True
| CGateS |
python | scrapy__scrapy | scrapy/spidermiddlewares/referer.py | {
"start": 9890,
"end": 11612
} | class ____(NoReferrerWhenDowngradePolicy):
"""
A variant of "no-referrer-when-downgrade",
with the addition that "Referer" is not sent if the parent request was
using ``file://`` or ``s3://`` scheme.
"""
NOREFERRER_SCHEMES: tuple[str, ...] = (*LOCAL_SCHEMES, "file", "s3")
name: str = POLICY_SCRAPY_DEFAULT
_policy_classes: dict[str, type[ReferrerPolicy]] = {
p.name: p
for p in (
NoReferrerPolicy,
NoReferrerWhenDowngradePolicy,
SameOriginPolicy,
OriginPolicy,
StrictOriginPolicy,
OriginWhenCrossOriginPolicy,
StrictOriginWhenCrossOriginPolicy,
UnsafeUrlPolicy,
DefaultReferrerPolicy,
)
}
# Reference: https://www.w3.org/TR/referrer-policy/#referrer-policy-empty-string
_policy_classes[""] = NoReferrerWhenDowngradePolicy
def _load_policy_class(
policy: str, warning_only: bool = False
) -> type[ReferrerPolicy] | None:
"""
Expect a string for the path to the policy class,
otherwise try to interpret the string as a standard value
from https://www.w3.org/TR/referrer-policy/#referrer-policies
"""
try:
return cast("type[ReferrerPolicy]", load_object(policy))
except ValueError:
tokens = [token.strip() for token in policy.lower().split(",")]
# https://www.w3.org/TR/referrer-policy/#parse-referrer-policy-from-header
for token in tokens[::-1]:
if token in _policy_classes:
return _policy_classes[token]
msg = f"Could not load referrer policy {policy!r}"
if not warning_only:
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning)
return None
| DefaultReferrerPolicy |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.