language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
django__django
|
tests/check_framework/test_security.py
|
{
"start": 2547,
"end": 4779
}
|
class ____(SimpleTestCase):
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[],
)
def test_session_cookie_httponly_with_installed_app(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and "django.contrib.sessions"
is in INSTALLED_APPS.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [sessions.W013])
@override_settings(
SESSION_COOKIE_HTTPONLY="1",
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[],
)
def test_session_cookie_httponly_with_installed_app_truthy(self):
"""SESSION_COOKIE_HTTPONLY must be boolean."""
self.assertEqual(sessions.check_session_cookie_httponly(None), [sessions.W013])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=[],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"],
)
def test_session_cookie_httponly_with_middleware(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [sessions.W014])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"],
)
def test_session_cookie_httponly_both(self):
"""
If SESSION_COOKIE_HTTPONLY is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [sessions.W015])
@override_settings(
SESSION_COOKIE_HTTPONLY=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"],
)
def test_session_cookie_httponly_true(self):
"""
If SESSION_COOKIE_HTTPONLY is on, there's no warning about it.
"""
self.assertEqual(sessions.check_session_cookie_httponly(None), [])
|
CheckSessionCookieHttpOnlyTest
|
python
|
ray-project__ray
|
python/ray/data/_internal/execution/backpressure_policy/backpressure_policy.py
|
{
"start": 391,
"end": 2190
}
|
class ____(ABC):
"""Interface for back pressure policies."""
def __init__(
self,
data_context: DataContext,
topology: "Topology",
resource_manager: "ResourceManager",
):
"""Initialize the backpressure policy.
Args:
data_context: The data context.
topology: The execution topology.
resource_manager: The resource manager.
"""
self._data_context = data_context
self._topology = topology
self._resource_manager = resource_manager
def can_add_input(self, op: "PhysicalOperator") -> bool:
"""Determine if we can add a new input to the operator. If returns False, the
operator will be backpressured and will not be able to run new tasks.
Used in `streaming_executor_state.py::select_operator_to_run()`.
Returns: True if we can add a new input to the operator, False otherwise.
Note, if multiple backpressure policies are enabled, the operator will be
backpressured if any of the policies returns False.
"""
return True
def max_task_output_bytes_to_read(self, op: "PhysicalOperator") -> Optional[int]:
"""Return the maximum bytes of pending task outputs can be read for
the given operator. None means no limit.
This is used for output backpressure to limit how much data an operator
can read from its running tasks.
Note, if multiple backpressure policies return non-None values for an operator,
the minimum of those values will be used as the limit.
Args:
op: The operator to get the limit for.
Returns:
The maximum bytes that can be read, or None if no limit.
"""
return None
|
BackpressurePolicy
|
python
|
pydantic__pydantic
|
pydantic/types.py
|
{
"start": 52004,
"end": 54105
}
|
class ____(_SecretBase[SecretType]):
_inner_schema: ClassVar[CoreSchema]
_error_kind: ClassVar[str]
@classmethod
def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
def get_json_schema(_core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue:
json_schema = handler(cls._inner_schema)
_utils.update_not_none(
json_schema,
type='string',
writeOnly=True,
format='password',
)
return json_schema
def get_secret_schema(strict: bool) -> CoreSchema:
inner_schema = {**cls._inner_schema, 'strict': strict}
json_schema = core_schema.no_info_after_validator_function(
source, # construct the type
inner_schema, # pyright: ignore[reportArgumentType]
)
return core_schema.json_or_python_schema(
python_schema=core_schema.union_schema(
[
core_schema.is_instance_schema(source),
json_schema,
],
custom_error_type=cls._error_kind,
),
json_schema=json_schema,
serialization=core_schema.plain_serializer_function_ser_schema(
_serialize_secret_field,
info_arg=True,
when_used='always',
),
)
return core_schema.lax_or_strict_schema(
lax_schema=get_secret_schema(strict=False),
strict_schema=get_secret_schema(strict=True),
metadata={'pydantic_js_functions': [get_json_schema]},
)
__pydantic_serializer__ = SchemaSerializer(
core_schema.any_schema(
serialization=core_schema.plain_serializer_function_ser_schema(
_serialize_secret_field,
info_arg=True,
when_used='always',
)
)
)
|
_SecretField
|
python
|
pypa__pip
|
src/pip/_vendor/distlib/util.py
|
{
"start": 55297,
"end": 55800
}
|
class ____(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
|
CSVWriter
|
python
|
pandas-dev__pandas
|
pandas/tests/test_errors.py
|
{
"start": 2090,
"end": 4016
}
|
class ____:
@classmethod
def classmethod(cls):
raise AbstractMethodError(cls, methodtype="classmethod")
@property
def property(self):
raise AbstractMethodError(self, methodtype="property")
def method(self):
raise AbstractMethodError(self)
def test_AbstractMethodError_classmethod():
xpr = "This classmethod must be defined in the concrete class Foo"
with pytest.raises(AbstractMethodError, match=xpr):
Foo.classmethod()
xpr = "This property must be defined in the concrete class Foo"
with pytest.raises(AbstractMethodError, match=xpr):
Foo().property
xpr = "This method must be defined in the concrete class Foo"
with pytest.raises(AbstractMethodError, match=xpr):
Foo().method()
@pytest.mark.parametrize(
"warn_category, catch_category",
[
(Pandas4Warning, PandasChangeWarning),
(Pandas4Warning, PandasDeprecationWarning),
(Pandas5Warning, PandasChangeWarning),
(Pandas5Warning, PandasPendingDeprecationWarning),
],
)
def test_pandas_warnings(warn_category, catch_category):
# https://github.com/pandas-dev/pandas/pull/61468
with tm.assert_produces_warning(catch_category):
warnings.warn("test", category=warn_category)
@pytest.mark.parametrize(
"warn_category, filter_category",
[
(Pandas4Warning, PandasChangeWarning),
(Pandas4Warning, PandasDeprecationWarning),
(Pandas5Warning, PandasChangeWarning),
(Pandas5Warning, PandasPendingDeprecationWarning),
],
)
def test_pandas_warnings_filter(warn_category, filter_category):
# https://github.com/pandas-dev/pandas/pull/61468
# Ensure users can suppress warnings.
with tm.assert_produces_warning(None), warnings.catch_warnings():
warnings.filterwarnings(category=filter_category, action="ignore")
warnings.warn("test", category=warn_category)
|
Foo
|
python
|
getsentry__sentry
|
src/sentry/analytics/events/sentry_app_created.py
|
{
"start": 75,
"end": 283
}
|
class ____(analytics.Event):
user_id: int
organization_id: int
sentry_app: str
created_alert_rule_ui_component: bool | None = None
analytics.register(SentryAppCreatedEvent)
|
SentryAppCreatedEvent
|
python
|
pytorch__pytorch
|
torch/distributions/constraints.py
|
{
"start": 10599,
"end": 10892
}
|
class ____(Constraint):
"""
Constrain to one-hot vectors.
"""
is_discrete = True
event_dim = 1
def check(self, value):
is_boolean = (value == 0) | (value == 1)
is_normalized = value.sum(-1).eq(1)
return is_boolean.all(-1) & is_normalized
|
_OneHot
|
python
|
ansible__ansible
|
test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py
|
{
"start": 407,
"end": 1935
}
|
class ____:
"""Defines an unwanted import."""
def __init__(
self,
alternative, # type: str
modules_only=False, # type: bool
names=None, # type: t.Optional[t.Tuple[str, ...]]
ignore_paths=None, # type: t.Optional[t.Tuple[str, ...]]
ansible_test_only=False, # type: bool
): # type: (...) -> None
self.alternative = alternative
self.modules_only = modules_only
self.names = set(names) if names else set()
self.ignore_paths = ignore_paths
self.ansible_test_only = ansible_test_only
def applies_to(self, path, name=None): # type: (str, t.Optional[str]) -> bool
"""Return True if this entry applies to the given path, otherwise return False."""
if self.names:
if not name:
return False
if name not in self.names:
return False
if self.ignore_paths and any(path.endswith(ignore_path) for ignore_path in self.ignore_paths):
return False
if self.ansible_test_only and '/test/lib/ansible_test/_internal/' not in path:
return False
if self.modules_only:
return is_module_path(path)
return True
def is_module_path(path): # type: (str) -> bool
"""Return True if the given path is a module or module_utils path, otherwise return False."""
return path.startswith(ANSIBLE_TEST_MODULES_PATH) or path.startswith(ANSIBLE_TEST_MODULE_UTILS_PATH)
|
UnwantedEntry
|
python
|
facebook__pyre-check
|
client/commands/server_state.py
|
{
"start": 1467,
"end": 1596
}
|
class ____:
code: str
is_dirty: bool = False
pyre_code_updated: bool = False
@dataclasses.dataclass
|
OpenedDocumentState
|
python
|
jazzband__django-oauth-toolkit
|
tests/test_oauth2_validators.py
|
{
"start": 17433,
"end": 22298
}
|
class ____(TransactionTestCase):
"""These test cases check that the recommended error codes are returned
when token authentication fails.
RFC-6750: https://rfc-editor.org/rfc/rfc6750.html
> If the protected resource request does not include authentication
> credentials or does not contain an access token that enables access
> to the protected resource, the resource server MUST include the HTTP
> "WWW-Authenticate" response header field[.]
>
> ...
>
> If the request lacks any authentication information..., the
> resource server SHOULD NOT include an error code or other error
> information.
>
> ...
>
> If the protected resource request included an access token and failed
> authentication, the resource server SHOULD include the "error"
> attribute to provide the client with the reason why the access
> request was declined.
See https://rfc-editor.org/rfc/rfc6750.html#section-3.1 for the allowed error
codes.
"""
def setUp(self):
self.user = UserModel.objects.create_user(
"user",
"test@example.com",
"123456",
)
self.request = mock.MagicMock(wraps=Request)
self.request.user = self.user
self.request.grant_type = "not client"
self.validator = OAuth2Validator()
self.application = Application.objects.create(
client_id="client_id",
client_secret=CLEARTEXT_SECRET,
user=self.user,
client_type=Application.CLIENT_PUBLIC,
authorization_grant_type=Application.GRANT_PASSWORD,
)
self.request.client = self.application
def test_validate_bearer_token_does_not_add_error_when_no_token_is_provided(self):
self.assertFalse(self.validator.validate_bearer_token(None, ["dolphin"], self.request))
with self.assertRaises(AttributeError):
self.request.oauth2_error
def test_validate_bearer_token_adds_error_to_the_request_when_an_invalid_token_is_provided(self):
access_token = mock.MagicMock(token="some_invalid_token")
self.assertFalse(
self.validator.validate_bearer_token(
access_token.token,
[],
self.request,
)
)
self.assertDictEqual(
self.request.oauth2_error,
{
"error": "invalid_token",
"error_description": "The access token is invalid.",
},
)
def test_validate_bearer_token_adds_error_to_the_request_when_an_expired_token_is_provided(self):
access_token = AccessToken.objects.create(
token="some_valid_token",
user=self.user,
expires=timezone.now() - datetime.timedelta(seconds=1),
application=self.application,
)
self.assertFalse(
self.validator.validate_bearer_token(
access_token.token,
[],
self.request,
)
)
self.assertDictEqual(
self.request.oauth2_error,
{
"error": "invalid_token",
"error_description": "The access token has expired.",
},
)
def test_validate_bearer_token_adds_error_to_the_request_when_a_valid_token_has_insufficient_scope(self):
access_token = AccessToken.objects.create(
token="some_valid_token",
user=self.user,
expires=timezone.now() + datetime.timedelta(seconds=1),
application=self.application,
)
self.assertFalse(
self.validator.validate_bearer_token(
access_token.token,
["some_extra_scope"],
self.request,
)
)
self.assertDictEqual(
self.request.oauth2_error,
{
"error": "insufficient_scope",
"error_description": "The access token is valid but does not have enough scope.",
},
)
def test_validate_bearer_token_adds_error_to_the_request_when_a_invalid_custom_token_is_provided(self):
access_token = AccessToken.objects.create(
token="some_valid_token",
user=self.user,
expires=timezone.now() + datetime.timedelta(seconds=1),
application=self.application,
)
with always_invalid_token():
self.assertFalse(
self.validator.validate_bearer_token(
access_token.token,
[],
self.request,
)
)
self.assertDictEqual(
self.request.oauth2_error,
{
"error": "invalid_token",
},
)
|
TestOAuth2ValidatorProvidesErrorData
|
python
|
mlflow__mlflow
|
mlflow/store/tracking/dbmodels/initial_models.py
|
{
"start": 1202,
"end": 2529
}
|
class ____(Base):
"""
DB model for :py:class:`mlflow.entities.Experiment`. These are recorded in ``experiment`` table.
"""
__tablename__ = "experiments"
experiment_id = Column(Integer, autoincrement=True)
"""
Experiment ID: `Integer`. *Primary Key* for ``experiment`` table.
"""
name = Column(String(256), unique=True, nullable=False)
"""
Experiment name: `String` (limit 256 characters). Defined as *Unique* and *Non null* in
table schema.
"""
artifact_location = Column(String(256), nullable=True)
"""
Default artifact location for this experiment: `String` (limit 256 characters). Defined as
*Non null* in table schema.
"""
lifecycle_stage = Column(String(32), default="active")
"""
Lifecycle Stage of experiment: `String` (limit 32 characters).
Can be either ``active`` (default) or ``deleted``.
"""
__table_args__ = (
CheckConstraint(
lifecycle_stage.in_(["active", "deleted"]), name="experiments_lifecycle_stage"
),
PrimaryKeyConstraint("experiment_id", name="experiment_pk"),
)
def __repr__(self):
return f"<SqlExperiment ({self.experiment_id}, {self.name})>"
|
SqlExperiment
|
python
|
ray-project__ray
|
release/llm_tests/benchmark/configs.py
|
{
"start": 378,
"end": 4700
}
|
class ____(BaseModel):
provider: Optional[str] = Field(
None,
description="Which flavor of API to use. If not specified, we'll try to guess based on the URL and /v1/models output",
)
model: Optional[str] = Field(
None,
description="The model to use for generating text. If not specified we will pick the first model from the service as returned by /v1/models",
)
chat: bool = Field(True, description="Use /v1/chat/completions API")
prompt_tokens: int = Field(
512,
description="Length of the prompt in tokens",
)
prompt_chars: Optional[int] = Field(
None,
description="Length of the prompt in characters",
)
prompt_text: Optional[str] = Field(
None,
description="Prompt text to use instead of generating one. It can be a file reference starting with an ampersand, e.g. `@prompt.txt`",
)
prompt_randomize: bool = Field(
False,
description="Include a few random numbers in the generated prompt to avoid caching",
)
max_tokens: int = Field(
64,
description="Max number of tokens to generate. If max_tokens_distribution is non-constant this is going to be the mean",
)
max_tokens_cap: Optional[int] = Field(
None,
description="If max_tokens_distribution is non-constant, this truncates the distribition at the specified limit",
)
max_tokens_distribution: TokensDistributionType = Field(
TokensDistributionType.CONSTANT,
description="How to sample max_tokens on each request",
)
max_tokens_range: float = Field(
0.3,
description="Specifies the width of the distribution. Specified value `alpha` is relative to `max_tokens`",
)
stream: bool = Field(True, description="Use the streaming API")
api_key: Optional[str] = Field(
None,
description="Auth for the API",
)
temperature: float = Field(0.1, description="Temperature parameter for the API")
logprobs: Optional[int] = Field(
None,
description="Whether to ask for logprobs, it makes things slower for some providers but is necessary for token count in streaming",
)
summary_file: Optional[str] = Field(
None,
description="Append the line with the summary to the specified CSV file",
)
qps: Optional[float] = Field(
None,
description="Enabled 'fixed QPS' mode where requests are issues at the specified rate regardless of how long the processing takes",
)
qps_distribution: DistributionType = Field(
DistributionType.CONSTANT,
description="Must be used with qps. Specifies how to space out requests",
)
burst: Optional[float] = Field(
None,
description="Makes requests to arrive in bursts every specified number of seconds",
)
tokenizer: Optional[str] = Field(
None,
description="Specify HF tokenizer to use for validating the output of the model",
)
show_response: bool = Field(
False,
description="Print the result of each generation",
)
prompt_cache_max_len: int = Field(
0,
description="Maximum length of the prompt cache to use",
)
header: List[str] = Field(
default_factory=list,
description="Arbitrary headers to add to the inference request",
)
n: int = Field(
1,
description="How many sequences to generate (makes sense to use with non-zero temperature)",
)
host: Optional[str] = Field(
default=None,
description="Host to load test in the following format: http://10.21.32.33",
)
reset_stats: bool = Field(
default=True,
description="Determines if stats should be reset once hatching is complete",
)
users: int = Field(
default=None,
description="Number of concurrent users to spawn for benchmarking.",
)
run_time: str = Field(
default="30s",
description="The runtime it is in form of Ns, Nm, or Nh, for seconds, minutes, and hours.",
)
def to_namespace(self) -> argparse.Namespace:
"""
Convert the model to an argparse.Namespace object
"""
return argparse.Namespace(**self.dict())
|
LoadTestConfig
|
python
|
openai__openai-python
|
src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
|
{
"start": 218,
"end": 840
}
|
class ____(BaseModel):
delta: str
"""The partial code snippet being streamed by the code interpreter."""
item_id: str
"""The unique identifier of the code interpreter tool call item."""
output_index: int
"""
The index of the output item in the response for which the code is being
streamed.
"""
sequence_number: int
"""The sequence number of this event, used to order streaming events."""
type: Literal["response.code_interpreter_call_code.delta"]
"""The type of the event. Always `response.code_interpreter_call_code.delta`."""
|
ResponseCodeInterpreterCallCodeDeltaEvent
|
python
|
PyCQA__pylint
|
doc/data/messages/i/invalid-hash-returned/bad.py
|
{
"start": 0,
"end": 120
}
|
class ____:
"""__hash__ returns dict"""
def __hash__(self): # [invalid-hash-returned]
return {}
|
CustomHash
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/solverHigherOrder13.py
|
{
"start": 167,
"end": 482
}
|
class ____(Generic[*S, D]): ...
def func1[*S1, D1, *S2, D2, Dim1](
c: Callable[[N[*S1, D1], N[*S2, D2]], Any],
) -> Callable[[N[Dim1, *S1, D1], N[Dim1, *S2, D2]], Any]: ...
def func2[X, Y, Z](x: N[X, Y, Z], y: N[X, Y, Z]):
func1(func3)(x, y)
def func3[Dim1, T](x: N[Dim1, T], y: N[Dim1, T]) -> N[T]: ...
|
N
|
python
|
encode__starlette
|
starlette/routing.py
|
{
"start": 13792,
"end": 18650
}
|
class ____(BaseRoute):
def __init__(
self,
path: str,
app: ASGIApp | None = None,
routes: Sequence[BaseRoute] | None = None,
name: str | None = None,
*,
middleware: Sequence[Middleware] | None = None,
) -> None:
assert path == "" or path.startswith("/"), "Routed paths must start with '/'"
assert app is not None or routes is not None, "Either 'app=...', or 'routes=' must be specified"
self.path = path.rstrip("/")
if app is not None:
self._base_app: ASGIApp = app
else:
self._base_app = Router(routes=routes)
self.app = self._base_app
if middleware is not None:
for cls, args, kwargs in reversed(middleware):
self.app = cls(self.app, *args, **kwargs)
self.name = name
self.path_regex, self.path_format, self.param_convertors = compile_path(self.path + "/{path:path}")
@property
def routes(self) -> list[BaseRoute]:
return getattr(self._base_app, "routes", [])
def matches(self, scope: Scope) -> tuple[Match, Scope]:
path_params: dict[str, Any]
if scope["type"] in ("http", "websocket"): # pragma: no branch
root_path = scope.get("root_path", "")
route_path = get_route_path(scope)
match = self.path_regex.match(route_path)
if match:
matched_params = match.groupdict()
for key, value in matched_params.items():
matched_params[key] = self.param_convertors[key].convert(value)
remaining_path = "/" + matched_params.pop("path")
matched_path = route_path[: -len(remaining_path)]
path_params = dict(scope.get("path_params", {}))
path_params.update(matched_params)
child_scope = {
"path_params": path_params,
# app_root_path will only be set at the top level scope,
# initialized with the (optional) value of a root_path
# set above/before Starlette. And even though any
# mount will have its own child scope with its own respective
# root_path, the app_root_path will always be available in all
# the child scopes with the same top level value because it's
# set only once here with a default, any other child scope will
# just inherit that app_root_path default value stored in the
# scope. All this is needed to support Request.url_for(), as it
# uses the app_root_path to build the URL path.
"app_root_path": scope.get("app_root_path", root_path),
"root_path": root_path + matched_path,
"endpoint": self.app,
}
return Match.FULL, child_scope
return Match.NONE, {}
def url_path_for(self, name: str, /, **path_params: Any) -> URLPath:
if self.name is not None and name == self.name and "path" in path_params:
# 'name' matches "<mount_name>".
path_params["path"] = path_params["path"].lstrip("/")
path, remaining_params = replace_params(self.path_format, self.param_convertors, path_params)
if not remaining_params:
return URLPath(path=path)
elif self.name is None or name.startswith(self.name + ":"):
if self.name is None:
# No mount name.
remaining_name = name
else:
# 'name' matches "<mount_name>:<child_name>".
remaining_name = name[len(self.name) + 1 :]
path_kwarg = path_params.get("path")
path_params["path"] = ""
path_prefix, remaining_params = replace_params(self.path_format, self.param_convertors, path_params)
if path_kwarg is not None:
remaining_params["path"] = path_kwarg
for route in self.routes or []:
try:
url = route.url_path_for(remaining_name, **remaining_params)
return URLPath(path=path_prefix.rstrip("/") + str(url), protocol=url.protocol)
except NoMatchFound:
pass
raise NoMatchFound(name, path_params)
async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:
await self.app(scope, receive, send)
def __eq__(self, other: Any) -> bool:
return isinstance(other, Mount) and self.path == other.path and self.app == other.app
def __repr__(self) -> str:
class_name = self.__class__.__name__
name = self.name or ""
return f"{class_name}(path={self.path!r}, name={name!r}, app={self.app!r})"
|
Mount
|
python
|
sqlalchemy__sqlalchemy
|
test/engine/test_pool.py
|
{
"start": 3034,
"end": 10310
}
|
class ____(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select(1).compile(testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not(c.dbapi_connection, c2.dbapi_connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.dbapi_connection
c1 = r1.get_connection()
is_(c1, r1.dbapi_connection)
is_(c1, r1.driver_connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.dbapi_connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.dbapi_connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not(c1, c2)
is_(c2, r1.dbapi_connection)
eq_(c2.mock_calls, [])
@testing.combinations(
(
pool.QueuePool,
dict(pool_size=8, max_overflow=10, timeout=25, use_lifo=True),
),
(pool.QueuePool, {}),
(pool.NullPool, {}),
(pool.SingletonThreadPool, {}),
(pool.StaticPool, {}),
(pool.AssertionPool, {}),
)
def test_recreate_state(self, pool_cls, pool_args):
creator = object()
pool_args["pre_ping"] = True
pool_args["reset_on_return"] = "commit"
pool_args["recycle"] = 35
pool_args["logging_name"] = "somepool"
pool_args["dialect"] = default.DefaultDialect()
pool_args["echo"] = "debug"
p1 = pool_cls(creator=creator, **pool_args)
cls_keys = dir(pool_cls)
d1 = dict(p1.__dict__)
p2 = p1.recreate()
d2 = dict(p2.__dict__)
for k in cls_keys:
d1.pop(k, None)
d2.pop(k, None)
for k in (
"_invoke_creator",
"_pool",
"_overflow_lock",
"_fairy",
"_conn",
"logger",
):
if k in d2:
d2[k] = mock.ANY
eq_(d1, d2)
eq_(p1.echo, p2.echo)
is_(p1._dialect, p2._dialect)
if "use_lifo" in pool_args:
eq_(p1._pool.use_lifo, p2._pool.use_lifo)
@testing.combinations(
(pool.QueuePool, False),
(pool.AsyncAdaptedQueuePool, True),
(pool.NullPool, None),
(pool.SingletonThreadPool, False),
(pool.StaticPool, None),
(pool.AssertionPool, None),
)
def test_is_asyncio_from_dialect(self, pool_cls, is_async_kind):
p = pool_cls(creator=object())
for is_async in (True, False):
if is_async:
p._dialect = _AsyncConnDialect()
else:
p._dialect = _ConnDialect()
if is_async_kind is None:
eq_(p._is_asyncio, is_async)
else:
eq_(p._is_asyncio, is_async_kind)
@testing.combinations(
(pool.QueuePool, False),
(pool.AsyncAdaptedQueuePool, True),
(pool.NullPool, False),
(pool.SingletonThreadPool, False),
(pool.StaticPool, False),
(pool.AssertionPool, False),
)
def test_is_asyncio_from_dialect_cls(self, pool_cls, is_async):
eq_(pool_cls._is_asyncio, is_async)
def test_rec_fairy_default_dialect(self):
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
rec = pool._ConnectionRecord(p1)
is_not_none(rec.dbapi_connection)
is_(rec.driver_connection, rec.dbapi_connection)
fairy = pool._ConnectionFairy(p1, rec.dbapi_connection, rec, False)
is_not_none(fairy.dbapi_connection)
is_(fairy.driver_connection, fairy.dbapi_connection)
is_(fairy.dbapi_connection, rec.dbapi_connection)
is_(fairy.driver_connection, rec.driver_connection)
def test_rec_fairy_adapted_dialect(self):
dbapi = MockDBAPI()
mock_dc = object()
class _AdaptedDialect(_ConnDialect):
def get_driver_connection(self, connection):
return mock_dc
p1 = pool.Pool(
creator=lambda: dbapi.connect("foo.db"), dialect=_AdaptedDialect()
)
rec = pool._ConnectionRecord(p1)
assert rec.dbapi_connection is not None
is_not_none(rec.dbapi_connection)
is_(rec.driver_connection, mock_dc)
fairy = pool._ConnectionFairy(p1, rec.dbapi_connection, rec, False)
is_not_none(fairy.dbapi_connection)
is_(fairy.driver_connection, mock_dc)
is_(fairy.dbapi_connection, rec.dbapi_connection)
is_(fairy.driver_connection, mock_dc)
|
PoolTest
|
python
|
doocs__leetcode
|
solution/2700-2799/2737.Find the Closest Marked Node/Solution.py
|
{
"start": 0,
"end": 676
}
|
class ____:
def minimumDistance(
self, n: int, edges: List[List[int]], s: int, marked: List[int]
) -> int:
g = [[inf] * n for _ in range(n)]
for u, v, w in edges:
g[u][v] = min(g[u][v], w)
dist = [inf] * n
vis = [False] * n
dist[s] = 0
for _ in range(n):
t = -1
for j in range(n):
if not vis[j] and (t == -1 or dist[t] > dist[j]):
t = j
vis[t] = True
for j in range(n):
dist[j] = min(dist[j], dist[t] + g[t][j])
ans = min(dist[i] for i in marked)
return -1 if ans >= inf else ans
|
Solution
|
python
|
ZoranPandovski__al-go-rithms
|
data_structures/Graphs/bellmanford-optimized/python/bellmanFordOptimized.py
|
{
"start": 13,
"end": 242
}
|
class ____: #Custom class to keep together information stored on edges (the neighbour and the cost to get there)
def __init__(self, neighbour = None, cost = None):
self.neighbour = neighbour
self.cost = cost
|
Node
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 19510,
"end": 19650
}
|
class ____(BaseModel):
consensus_thread_status: Literal[
"stopped",
] = Field(..., description="")
|
ConsensusThreadStatusOneOf1
|
python
|
langchain-ai__langchain
|
libs/standard-tests/tests/unit_tests/test_in_memory_vectorstore.py
|
{
"start": 450,
"end": 614
}
|
class ____(InMemoryVectorStore):
"""InMemoryVectorStore that does not implement get_by_ids."""
get_by_ids = VectorStore.get_by_ids
|
WithoutGetByIdsVectorStore
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_health_checks.py
|
{
"start": 4008,
"end": 4624
}
|
class ____:
@given(st.none())
def test(self, _):
pass
def test_differing_executors_fails_health_check():
sample_test_runner().test()
msg = re.escape(str(HealthCheck.differing_executors))
with pytest.raises(FailedHealthCheck, match=msg):
sample_test_runner().test()
def test_it_is_an_error_to_suppress_non_iterables():
with pytest.raises(InvalidArgument):
settings(suppress_health_check=1)
def test_it_is_an_error_to_suppress_non_healthchecks():
with pytest.raises(InvalidArgument):
settings(suppress_health_check=["notahealthcheck"])
|
sample_test_runner
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 68224,
"end": 68764
}
|
class ____(sgqlc.types.Enum):
"""The possible events to perform on a pull request review.
Enumeration Choices:
* `APPROVE`: Submit feedback and approve merging these changes.
* `COMMENT`: Submit general feedback without explicit approval.
* `DISMISS`: Dismiss review so it now longer effects merging.
* `REQUEST_CHANGES`: Submit feedback that must be addressed before
merging.
"""
__schema__ = github_schema
__choices__ = ("APPROVE", "COMMENT", "DISMISS", "REQUEST_CHANGES")
|
PullRequestReviewEvent
|
python
|
pypa__packaging
|
tests/test_tags.py
|
{
"start": 36345,
"end": 43920
}
|
class ____:
def test_iterator_returned(self) -> None:
result_iterator = tags.cpython_tags(
(3, 8), ["cp38d", "cp38"], ["plat1", "plat2"]
)
assert isinstance(result_iterator, collections.abc.Iterator)
def test_all_args(self) -> None:
result_iterator = tags.cpython_tags(
(3, 11), ["cp311d", "cp311"], ["plat1", "plat2"]
)
result = list(result_iterator)
assert result == [
tags.Tag("cp311", "cp311d", "plat1"),
tags.Tag("cp311", "cp311d", "plat2"),
tags.Tag("cp311", "cp311", "plat1"),
tags.Tag("cp311", "cp311", "plat2"),
tags.Tag("cp311", "abi3", "plat1"),
tags.Tag("cp311", "abi3", "plat2"),
tags.Tag("cp311", "none", "plat1"),
tags.Tag("cp311", "none", "plat2"),
tags.Tag("cp310", "abi3", "plat1"),
tags.Tag("cp310", "abi3", "plat2"),
tags.Tag("cp39", "abi3", "plat1"),
tags.Tag("cp39", "abi3", "plat2"),
tags.Tag("cp38", "abi3", "plat1"),
tags.Tag("cp38", "abi3", "plat2"),
tags.Tag("cp37", "abi3", "plat1"),
tags.Tag("cp37", "abi3", "plat2"),
tags.Tag("cp36", "abi3", "plat1"),
tags.Tag("cp36", "abi3", "plat2"),
tags.Tag("cp35", "abi3", "plat1"),
tags.Tag("cp35", "abi3", "plat2"),
tags.Tag("cp34", "abi3", "plat1"),
tags.Tag("cp34", "abi3", "plat2"),
tags.Tag("cp33", "abi3", "plat1"),
tags.Tag("cp33", "abi3", "plat2"),
tags.Tag("cp32", "abi3", "plat1"),
tags.Tag("cp32", "abi3", "plat2"),
]
result_iterator = tags.cpython_tags(
(3, 8), ["cp38d", "cp38"], ["plat1", "plat2"]
)
result = list(result_iterator)
assert result == [
tags.Tag("cp38", "cp38d", "plat1"),
tags.Tag("cp38", "cp38d", "plat2"),
tags.Tag("cp38", "cp38", "plat1"),
tags.Tag("cp38", "cp38", "plat2"),
tags.Tag("cp38", "abi3", "plat1"),
tags.Tag("cp38", "abi3", "plat2"),
tags.Tag("cp38", "none", "plat1"),
tags.Tag("cp38", "none", "plat2"),
tags.Tag("cp37", "abi3", "plat1"),
tags.Tag("cp37", "abi3", "plat2"),
tags.Tag("cp36", "abi3", "plat1"),
tags.Tag("cp36", "abi3", "plat2"),
tags.Tag("cp35", "abi3", "plat1"),
tags.Tag("cp35", "abi3", "plat2"),
tags.Tag("cp34", "abi3", "plat1"),
tags.Tag("cp34", "abi3", "plat2"),
tags.Tag("cp33", "abi3", "plat1"),
tags.Tag("cp33", "abi3", "plat2"),
tags.Tag("cp32", "abi3", "plat1"),
tags.Tag("cp32", "abi3", "plat2"),
]
result = list(tags.cpython_tags((3, 3), ["cp33m"], ["plat1", "plat2"]))
assert result == [
tags.Tag("cp33", "cp33m", "plat1"),
tags.Tag("cp33", "cp33m", "plat2"),
tags.Tag("cp33", "abi3", "plat1"),
tags.Tag("cp33", "abi3", "plat2"),
tags.Tag("cp33", "none", "plat1"),
tags.Tag("cp33", "none", "plat2"),
tags.Tag("cp32", "abi3", "plat1"),
tags.Tag("cp32", "abi3", "plat2"),
]
result = list(tags.cpython_tags((3, 13), ["cp313t"], ["plat1", "plat2"]))
assert result == [
tags.Tag("cp313", "cp313t", "plat1"),
tags.Tag("cp313", "cp313t", "plat2"),
tags.Tag("cp313", "none", "plat1"),
tags.Tag("cp313", "none", "plat2"),
]
def test_python_version_defaults(self) -> None:
tag = next(tags.cpython_tags(abis=["abi3"], platforms=["any"]))
interpreter = "cp" + tags._version_nodot(sys.version_info[:2])
assert interpreter == tag.interpreter
def test_abi_defaults(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(tags, "_cpython_abis", lambda _1, _2: ["cp38"])
result = list(tags.cpython_tags((3, 8), platforms=["any"]))
assert tags.Tag("cp38", "cp38", "any") in result
assert tags.Tag("cp38", "abi3", "any") in result
assert tags.Tag("cp38", "none", "any") in result
def test_abi_defaults_needs_underscore(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(tags, "_cpython_abis", lambda _1, _2: ["cp311"])
result = list(tags.cpython_tags((3, 11), platforms=["any"]))
assert tags.Tag("cp311", "cp311", "any") in result
assert tags.Tag("cp311", "abi3", "any") in result
assert tags.Tag("cp311", "none", "any") in result
def test_platforms_defaults(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(tags, "platform_tags", lambda: ["plat1"])
result = list(tags.cpython_tags((3, 8), abis=["whatever"]))
assert tags.Tag("cp38", "whatever", "plat1") in result
def test_platforms_defaults_needs_underscore(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(tags, "platform_tags", lambda: ["plat1"])
result = list(tags.cpython_tags((3, 11), abis=["whatever"]))
assert tags.Tag("cp311", "whatever", "plat1") in result
def test_platform_name_space_normalization(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Ensure that spaces are translated to underscores in platform names."""
monkeypatch.setattr(sysconfig, "get_platform", lambda: "isilon onefs")
for tag in tags.cpython_tags():
assert " " not in tag.platform
def test_major_only_python_version(self) -> None:
result = list(tags.cpython_tags((3,), ["abi"], ["plat"]))
assert result == [
tags.Tag("cp3", "abi", "plat"),
tags.Tag("cp3", "none", "plat"),
]
def test_major_only_python_version_with_default_abis(self) -> None:
result = list(tags.cpython_tags((3,), platforms=["plat"]))
assert result == [tags.Tag("cp3", "none", "plat")]
@pytest.mark.parametrize("abis", [[], ["abi3"], ["none"]])
def test_skip_redundant_abis(self, abis: list[str]) -> None:
results = list(tags.cpython_tags((3, 0), abis=abis, platforms=["any"]))
assert results == [tags.Tag("cp30", "none", "any")]
def test_abi3_python33(self) -> None:
results = list(tags.cpython_tags((3, 3), abis=["cp33"], platforms=["plat"]))
assert results == [
tags.Tag("cp33", "cp33", "plat"),
tags.Tag("cp33", "abi3", "plat"),
tags.Tag("cp33", "none", "plat"),
tags.Tag("cp32", "abi3", "plat"),
]
def test_no_excess_abi3_python32(self) -> None:
results = list(tags.cpython_tags((3, 2), abis=["cp32"], platforms=["plat"]))
assert results == [
tags.Tag("cp32", "cp32", "plat"),
tags.Tag("cp32", "abi3", "plat"),
tags.Tag("cp32", "none", "plat"),
]
def test_no_abi3_python31(self) -> None:
results = list(tags.cpython_tags((3, 1), abis=["cp31"], platforms=["plat"]))
assert results == [
tags.Tag("cp31", "cp31", "plat"),
tags.Tag("cp31", "none", "plat"),
]
def test_no_abi3_python27(self) -> None:
results = list(tags.cpython_tags((2, 7), abis=["cp27"], platforms=["plat"]))
assert results == [
tags.Tag("cp27", "cp27", "plat"),
tags.Tag("cp27", "none", "plat"),
]
|
TestCPythonTags
|
python
|
cython__cython
|
tests/run/ass2global.py
|
{
"start": 205,
"end": 603
}
|
class ____(object):
"""
>>> global_in_class
9
>>> Test.global_in_class
Traceback (most recent call last):
AttributeError: type object 'Test' has no attribute 'global_in_class'
>>> Test().global_in_class
Traceback (most recent call last):
AttributeError: 'Test' object has no attribute 'global_in_class'
"""
global global_in_class
global_in_class = 9
|
Test
|
python
|
tensorflow__tensorflow
|
tensorflow/python/eager/polymorphic_function/concrete_function.py
|
{
"start": 14697,
"end": 37004
}
|
class ____(object):
"""Caches forward and backward functions compatible with eager gradients.
In contrast to the delayed-rewrite approach in
`_DelayedRewriteGradientFunctions` which only works with delayed execution,
the forward function generated by this class has a fixed set of outputs which
may be preserved by a tape in order to compute gradients later.
This class is abstract; its child classes differ in how many side outputs of
the forward function their backward function accepts gradients for, which
determines whether higher-order tape gradients are possible.
"""
def __init__(
self,
func_graph: func_graph_module.FuncGraph,
attrs,
func_graph_deleter,
forwardprop_input_indices,
delayed_rewrite_functions,
need_gradients_for_jvps,
):
self._func_graph = func_graph
self._forward_graph = None
self._attrs = attrs
self._forward = None
self._backward = None
self._num_outputs = len(func_graph.outputs)
self._func_graph_deleter = func_graph_deleter
self._forwardprop_input_indices = forwardprop_input_indices
self._forwardprop_output_indices = None
self._num_forwardprop_outputs = 0
self._num_inference_outputs = len(func_graph.outputs)
self._num_trainable_inference_outputs = len(
[t for t in func_graph.outputs if backprop_util.IsTrainable(t)])
self._delayed_rewrite_functions = delayed_rewrite_functions
self._need_gradients_for_jvps = need_gradients_for_jvps
def _build_functions_for_outputs(
self, outputs, inference_args, input_tangents):
"""Forward+backward functions where the backward function sees `outputs`."""
# First figure out which of `outputs` are trainable. We'll accept gradients
# for each of these in the backward function.
trainable_outputs = []
trainable_indices = []
for index, output in enumerate(outputs):
if backprop_util.IsTrainable(output):
trainable_outputs.append(output)
trainable_indices.append(index)
backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
with backwards_graph.as_default():
gradients_wrt_outputs = []
for output in trainable_outputs:
gradient_shape, gradient_dtype = default_gradient.shape_and_dtype(
output)
gradient_placeholder = graph_placeholder(gradient_dtype, gradient_shape)
handle_data_util.copy_handle_data(output, gradient_placeholder)
gradients_wrt_outputs.append(gradient_placeholder)
with ops.device(None):
gradients_wrt_inputs = gradients_util._GradientsHelper( # pylint: disable=protected-access
trainable_outputs,
self._func_graph.inputs,
grad_ys=gradients_wrt_outputs,
src_graph=self._func_graph)
if input_tangents:
# Convert IndexedSlices to dense tensors (as we do elsewhere for
# function gradients). Our C++ bindings don't know how to handle them
# currently.
gradients_wrt_inputs = nest.map_structure(
lambda x: ops.convert_to_tensor(x) if x is not None else None,
gradients_wrt_inputs)
captures_from_forward = [
c for c in backwards_graph.external_captures
if not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph
]
existing_outputs = object_identity.ObjectIdentitySet(
self._func_graph.outputs)
for capture in captures_from_forward:
if capture not in existing_outputs:
existing_outputs.add(capture)
self._func_graph.outputs.append(capture)
# The ordering of `backwards_graph.inputs` is important: inputs of
# `backward_function` correspond to outputs (including
# side outputs) of `self._tape_forward_function`.
backwards_graph.inputs = (
gradients_wrt_outputs + backwards_graph.internal_captures)
backwards_graph.outputs.extend(
grad
for grad in nest.flatten(gradients_wrt_inputs, expand_composites=True)
if grad is not None)
backwards_graph.structured_outputs = gradients_wrt_inputs
forward_function, backward_function = _create_forward_backward_with_graph(
self._attrs, self._func_graph, backwards_graph)
if not input_tangents:
# There is no need to special-case forwardprop, so we can return the
# forward+backward pair we've created without further wrapping.
return (forward_function, self._func_graph, backward_function,
# No forwardprop outputs.
None, 0)
forward_wrapper = self._wrap_forward_function_with_jvps(
forward_function, backward_function, inference_args, input_tangents)
(wrapped_backwards_graph,
forward_wrapper) = self._wrap_backward_function_with_jvp_backprop(
backward_function, gradients_wrt_outputs, forward_wrapper)
# Now that we've added new captures, we need to make sure forward outputs
# are in the same order the backward function expects them to be in:
# [inference outputs] + [jvps] + [side outputs] + [captures].
forward_wrapper = self._shuffle_forward_outputs(forward_wrapper)
(wrapped_forward_function,
wrapped_backward_function) = _create_forward_backward_with_graph(
self._attrs, forward_wrapper.graph, wrapped_backwards_graph)
if (len(inference_args) + len(input_tangents)
!= len(forward_wrapper.graph.inputs)):
raise errors.InternalError(
f"The forward graph had {len(forward_wrapper.graph.inputs)} inputs, "
f"but we expected {len(inference_args) + len(input_tangents)} "
f"({len(inference_args)} inference inputs and "
f"{len(input_tangents)} input tangents).")
return (wrapped_forward_function, forward_wrapper.graph,
wrapped_backward_function, forward_wrapper.output_indices,
len(forward_wrapper.output_tangents))
def _wrap_forward_function_with_jvps(
self, forward_function, backward_function,
inference_args, input_tangents):
"""Adds inline JVP computation to a forward function."""
forward_wrapper_graph = func_graph_module.FuncGraph(
_forward_name(self._func_graph.name))
with forward_wrapper_graph.as_default():
# Tell forward accumulators to free up space for new JVP computations,
# since one may be in the process of computing a JVP (if that computation
# triggered this function building).
#
# We'll make symbolic versions of input JVPs, run the forward function
# under forward accumulators to get symbolic output JVPs, then set those
# as outputs of the new wrapped forward function.
with forwardprop_util.push_forwardprop_state():
forward_captures = {
ops.tensor_id(internal): external
for external, internal in self._func_graph.captures}
for input_index, real_input in enumerate(self._func_graph.inputs):
# This loop is more or less equivalent to running tf.identity on each
# of self._func_graph.inputs. However, doing that also captures jvps
# for resource handles, which confuses the jvp capturing code below
# (since primal inputs are interwoven with jvp inputs).
input_placeholder = array_ops.placeholder(
dtype=real_input.dtype,
shape=real_input.shape)
capture = forward_captures.get(ops.tensor_id(real_input))
if capture is not None:
forward_wrapper_graph.add_capture(capture, input_placeholder)
if capture.dtype == dtypes.resource:
handle_data_util.copy_handle_data(capture, input_placeholder)
else:
forward_wrapper_graph.inputs.append(input_placeholder)
for inp, arg in zip(forward_wrapper_graph.inputs, inference_args):
record.record_operation(
"captured_value", [inp], [arg],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
num_inference_inputs = len(inference_args)
for tape_indices in self._forwardprop_input_indices:
for input_index, jvp_index in tape_indices:
input_placeholder = forward_wrapper_graph.inputs[input_index]
if len(forward_wrapper_graph.inputs) != jvp_index:
raise errors.InternalError(
f"Expected {jvp_index} forward graph inputs, "
f"got {len(forward_wrapper_graph.inputs)}.")
gradient_shape, gradient_dtype = default_gradient.shape_and_dtype(
input_placeholder)
jvp_placeholder = graph_placeholder(gradient_dtype, gradient_shape)
external_jvp = input_tangents[jvp_index - num_inference_inputs]
forward_wrapper_graph.add_capture(external_jvp, jvp_placeholder)
tensor_shape.TensorShape(
external_jvp.shape).assert_is_compatible_with(
jvp_placeholder.shape)
record.record_operation(
"captured_value",
[jvp_placeholder],
[external_jvp],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
forward_inputs = forward_wrapper_graph.inputs[:num_inference_inputs]
gradient_function = (
self._delayed_rewrite_functions._rewrite_forward_and_call_backward) # pylint: disable=protected-access
with ops.get_default_graph()._override_gradient_function( # pylint: disable=protected-access
{"PartitionedCall": gradient_function,
"StatefulPartitionedCall": gradient_function}):
forward_outputs = forward_function.call_flat(*forward_inputs)
if isinstance(forward_outputs, ops.Operation):
# _wrapped_backward_function expects a list, but if the function has
# no outputs its call() returns an Operation. We need to undo that
# so we don't cause problems later.
forward_outputs = []
py_backward, _ = self._wrap_backward_function(
self._func_graph, backward_function, forward_outputs)
# We will never request backward tape gradients for this operation
# directly since we're wrapping the call; forwardprop will call the
# backward function (and nested forward accumulators may build
# higher-order gradients), but any watching GradientTapes should ignore
# it.
#
# TODO(allenl): It might be better to explicitly stop backward recording
# so we don't use the second-order tape cases unnecessarily.
record.record_operation_forwardprop_only(
forward_function.cached_definition.signature.name,
forward_outputs, forward_inputs, py_backward, None)
output_indices, output_tangents = (
pywrap_tfe.TFE_Py_PackJVPs(forward_outputs))
output_tangents = [forward_wrapper_graph.capture(t)
for t in output_tangents]
return _ForwardWrapper(
graph=forward_wrapper_graph, outputs=forward_outputs,
output_indices=output_indices, output_tangents=output_tangents)
def _wrap_backward_function_with_jvp_backprop(
self, backward_function, gradients_wrt_outputs, forward_wrapper):
"""Wraps `backward_function` to include gradients for JVPs."""
wrapped_backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
with wrapped_backwards_graph.as_default():
py_backward, recorded_outputs = self._wrap_backward_function(
self._func_graph, backward_function, forward_wrapper.outputs)
trainable_index = 0
forward_doutputs = []
doutput_args = []
for output in recorded_outputs:
if backprop_util.IsTrainable(output):
doutput = gradients_wrt_outputs[trainable_index]
doutput_placeholder = graph_placeholder(doutput.dtype, doutput.shape)
doutput_args.append(doutput_placeholder)
forward_doutputs.append(doutput_placeholder)
trainable_index += 1
else:
doutput_args.append(None)
dinputs = py_backward(*doutput_args)
existing_outputs = object_identity.ObjectIdentitySet(
forward_wrapper.outputs + forward_wrapper.output_tangents)
num_processed_output_tangents = 0
gradients_wrt_output_tangents = []
tangent_doutputs = []
output_tangents = forward_wrapper.output_tangents
output_indices = forward_wrapper.output_indices
if self._need_gradients_for_jvps:
# TODO(allenl): Consider using a throwaway graph to avoid extra gradient
# evaluations; gradients for jvps may have common subgraphs.
while num_processed_output_tangents != len(output_tangents):
for output in output_tangents[num_processed_output_tangents:]:
gradient_shape, gradient_dtype = default_gradient.shape_and_dtype(
output)
placeholder = graph_placeholder(gradient_dtype, gradient_shape)
gradients_wrt_output_tangents.append(placeholder)
tangent_doutputs.append(placeholder)
num_processed_output_tangents = len(output_tangents)
with ops.device(None):
gradients_wrt_inputs = gradients_util._GradientsHelper( # pylint: disable=protected-access
output_tangents,
forward_wrapper.graph.inputs,
grad_ys=gradients_wrt_output_tangents,
src_graph=forward_wrapper.graph)
dinputs = [
backprop_util.AggregateIndexedSlicesGradients((existing, new))
for existing, new in zip(dinputs, gradients_wrt_inputs)
if existing is not None or new is not None]
dinputs.extend(gradients_wrt_inputs[len(dinputs):])
captures_from_forward = [
c for c in wrapped_backwards_graph.external_captures
if (not isinstance(c, ops.EagerTensor)
and c.graph is forward_wrapper.graph)]
for capture in captures_from_forward:
if capture not in existing_outputs:
existing_outputs.add(capture)
forward_wrapper.outputs.append(capture)
output_indices, output_tangents = (
forwardprop_util.pack_tangents(forward_wrapper.outputs))
output_tangents = [forward_wrapper.graph.capture(t)
for t in output_tangents]
for t in output_tangents:
existing_outputs.add(t)
wrapped_backwards_graph.inputs = (
forward_doutputs[:self._num_trainable_inference_outputs]
+ tangent_doutputs
+ forward_doutputs[self._num_trainable_inference_outputs:]
+ wrapped_backwards_graph.internal_captures)
wrapped_backwards_graph.structured_outputs = dinputs
wrapped_backwards_graph.outputs = [t for t in dinputs if t is not None]
return (wrapped_backwards_graph,
forward_wrapper._replace(output_indices=output_indices,
output_tangents=output_tangents))
def _shuffle_forward_outputs(self, forward_wrapper):
"""Reorders function outputs so captures are last."""
def _index_map(original):
if original < self._num_inference_outputs:
return original
if original >= len(forward_wrapper.outputs):
return (original - len(forward_wrapper.outputs)
+ self._num_inference_outputs)
return original + len(forward_wrapper.output_tangents)
output_indices = nest.map_structure(
_index_map, forward_wrapper.output_indices)
forward_wrapper.graph.outputs = (
forward_wrapper.outputs[:self._num_inference_outputs]
+ forward_wrapper.output_tangents
+ forward_wrapper.outputs[self._num_inference_outputs:])
return forward_wrapper._replace(output_indices=output_indices)
def forward(self, inference_args, input_tangents):
"""Construct or fetch a forward function with side-outputs.
When graph building without a tape active, symbolic gradients rely on
regenerating the backward function for higher-order gradients (to account
for new side outputs of the rewritten forward function call). Thus there is
no fixed backward function for this case. However, when a tape is active
(eager or graph building), we generate fixed backward and forward functions
at forward function call time.
This difference between the tape and non-tape cases is to avoid building
unneeded backward functions while graph building (where we may or may not
eventually need gradients).
Args:
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
Returns:
A forward atomic_function.AtomicFunction.
"""
if self._forward is None:
(
self._forward,
self._forward_graph,
self._backward,
self._forwardprop_output_indices,
self._num_forwardprop_outputs,
) = self._forward_and_backward_functions(inference_args, input_tangents)
return self._forward
def _wrap_backward_function(
self, forward_graph: func_graph_module.FuncGraph, backward, outputs
):
"""Create a backward function given `outputs` from the forward function."""
capture_mapping = dict(
zip((ops.tensor_id(t) for t in forward_graph.outputs), outputs)
)
captured_inputs = backward.captured_inputs
remapped_captures = [
capture_mapping.get(ops.tensor_id(capture), capture)
for capture in captured_inputs
]
if any(
t.graph is forward_graph
for t in remapped_captures
if not isinstance(t, ops.EagerTensor)
):
incorrect_mapping = [
t
for t in remapped_captures
if (
not isinstance(t, ops.EagerTensor)
and t.graph is not forward_graph
)
]
raise errors.InternalError(
"Failed to map all backward graph captures to "
"the forward graph. Incorrectly mapped: "
f"{incorrect_mapping}."
)
# We may need to use zeros_like to get a zero for variant Tensors with
# unconnected gradients. We do that in advance so we don't have to hold on
# to the outputs themselves, which may not be needed otherwise.
variant_zeros_like = {}
backward_function_inputs = len(backward.inputs) - len(captured_inputs)
recorded_outputs = []
trainable_recorded_outputs = 0
skip_positions = []
if self._num_forwardprop_outputs and not self._need_gradients_for_jvps:
relevant_outputs = (
outputs[: self._num_inference_outputs]
+ outputs[
self._num_inference_outputs + self._num_forwardprop_outputs :
]
)
else:
relevant_outputs = outputs
for output_index, output in enumerate(relevant_outputs):
if trainable_recorded_outputs < backward_function_inputs:
recorded_outputs.append(output)
if backprop_util.IsTrainable(output):
trainable_recorded_outputs += 1
else:
skip_positions.append(output_index)
if output.dtype == dtypes.variant:
variant_zeros_like[output_index] = default_gradient.zeros_like(output)
def _backward_function_wrapper(*args):
"""Process output gradients and call the backward function."""
if not backward.outputs:
return backward.structured_outputs
processed_args = []
input_index = 0
for output_index, arg in enumerate(args):
# Convert IndexedSlices to dense tensors. The IndexedSlices optimization
# is only really effective when doing tf.gather(variable) as the
# adjoint functions for most operations are unlikely to preserve the
# sparsity in IndexedSlices.
if isinstance(arg, indexed_slices.IndexedSlices):
arg = ops.convert_to_tensor(arg)
if output_index in skip_positions:
continue
if arg is None:
# We're calling a (non-polymorphic) ConcreteFunction, so we need to
# have a Tensor value for each Tensor we thought would be trainable
# based on its dtype, even if it ended up being unconnected.
input_placeholder = backward.inputs[
input_index]
if input_placeholder.dtype == dtypes.variant:
arg = variant_zeros_like[output_index]
else:
arg = array_ops.zeros(
*default_gradient.shape_and_dtype(input_placeholder))
processed_args.append(arg)
input_index += 1
if input_index >= backward_function_inputs:
break
return backward._call_flat( # pylint: disable=protected-access
processed_args, remapped_captures)
return _backward_function_wrapper, recorded_outputs
def record(self, flat_outputs, inference_args, input_tangents):
"""Record the function call operation.
For backprop, indicates the backward function to use and which new Tensors
must be watched. For forwardprop from eager, the function call itself will
have produced tangents which need to be recorded.
Args:
flat_outputs: The result of running `forward`.
inference_args: A flat list of Tensors with inference inputs to the
operation.
input_tangents: A flat list of Tensors with input tangents consumed by the
operation.
"""
backward_function, to_record = self._wrap_backward_function(
self._forward_graph, self._backward, flat_outputs
)
if self._forwardprop_output_indices:
record.record_operation_backprop_only(
self._forward.cached_definition.signature.name,
to_record,
inference_args,
backward_function,
)
record.record_operation_forwardprop_only(
self._forward.cached_definition.signature.name,
flat_outputs,
inference_args + input_tangents,
backward_function,
self._forwardprop_output_indices,
)
else:
record.record_operation(
self._forward.cached_definition.signature.name,
to_record,
inference_args + input_tangents,
backward_function,
)
|
_TapeGradientFunctions
|
python
|
celery__celery
|
t/integration/test_mem_leak_in_exception_handling.py
|
{
"start": 347,
"end": 8807
}
|
class ____:
"""Test class for memory leak scenarios with unhandled exceptions."""
def __init__(self):
self.app = Celery('test_memory_leak')
self.app.conf.update(
broker_url='memory://',
result_backend='cache+memory://',
task_always_eager=True,
task_eager_propagates=True,
task_store_eager_result=True,
)
self.setup_tasks()
def setup_tasks(self):
"""Setup test tasks."""
@self.app.task
def task_success():
"""Task that completes successfully - baseline for memory comparison."""
return "success"
@self.app.task
def task_unhandled_exception():
"""Task that raises an unhandled RuntimeError exception."""
raise RuntimeError("Unhandled exception for memory leak test")
@self.app.task(bind=True, max_retries=3)
def task_retry_then_fail(self):
"""Task that retries multiple times and eventually fails with unhandled exception."""
if self.request.retries < self.max_retries:
raise self.retry(countdown=0.001)
raise RuntimeError("Final retry failure - unhandled exception")
@self.app.task
def task_nested_exception_stack():
"""Task that raises exception through deeply nested function calls."""
def deep_level_5():
local_data = {"level": 5, "data": list(range(100))} # noqa: F841
raise ValueError("Deep nested exception at level 5")
def deep_level_4():
local_data = {"level": 4, "nested": {"data": list(range(50))}} # noqa: F841
deep_level_5()
def deep_level_3():
local_data = [1, 2, 3, {"nested": True}] # noqa: F841
deep_level_4()
def deep_level_2():
deep_level_3()
def deep_level_1():
deep_level_2()
deep_level_1()
self.task_success = task_success
self.task_unhandled_exception = task_unhandled_exception
self.task_retry_then_fail = task_retry_then_fail
self.task_nested_exception_stack = task_nested_exception_stack
def get_memory_usage():
"""
Get current memory usage in bytes.
Returns RSS (total process memory) if psutil is available,
otherwise returns Python heap allocations via tracemalloc.
Note: These measurements are not directly comparable.
"""
try:
import psutil
process = psutil.Process(os.getpid())
return process.memory_info().rss
except ImportError:
# Fallback to tracemalloc if psutil not available
current, peak = tracemalloc.get_traced_memory()
return current
def test_mem_leak_unhandled_exceptions():
"""Test that reproduces the memory leak when tasks raise unhandled exceptions."""
# Setup
test_instance = MemoryLeakUnhandledExceptionsTest()
# Enable memory tracing
tracemalloc.start()
# Warm up - run some successful tasks first
for _ in range(50):
try:
test_instance.task_success.apply()
except Exception:
pass
# Force garbage collection and get baseline memory
gc.collect()
baseline_memory = get_memory_usage()
# Run many failing tasks - this should demonstrate the leak
exception_count = 0
for _ in range(500): # Reduced from 1000 to make test faster
try:
test_instance.task_unhandled_exception.apply()
except Exception:
exception_count += 1
# Force garbage collection
gc.collect()
after_exceptions_memory = get_memory_usage()
# Run successful tasks again to ensure the leak is from exceptions
for _ in range(50):
try:
test_instance.task_success.apply()
except Exception:
pass
gc.collect()
final_memory = get_memory_usage()
# Calculate memory increase
memory_increase = after_exceptions_memory - baseline_memory
# Stop tracing
tracemalloc.stop()
# Log memory statistics for debugging
logger.debug("--- Memory Statistics ---") # Separator for better readability
logger.debug(f"Baseline memory: {baseline_memory / 1024 / 1024:.2f} MB")
logger.debug(f"After exceptions: {after_exceptions_memory / 1024 / 1024:.2f} MB")
logger.debug(f"Final memory: {final_memory / 1024 / 1024:.2f} MB")
logger.debug(f"Memory increase: {memory_increase / 1024 / 1024:.2f} MB")
logger.debug(f"Exceptions processed: {exception_count}")
# The test should demonstrate a significant memory increase
# This threshold may need adjustment based on the system
memory_increase_mb = memory_increase / 1024 / 1024
# Verify the memory leak is fixed - memory increase should be minimal
# Before fix: >70MB for 1000 tasks (~70KB/task)
# After fix: <5MB for 500 tasks (<10KB/task)
threshold_percent = float(os.getenv("MEMORY_LEAK_THRESHOLD_PERCENT", 10)) # Default: 10% increase
memory_threshold_mb = baseline_memory / 1024 / 1024 * (threshold_percent / 100)
assert memory_increase_mb < memory_threshold_mb, (
f"Memory leak still exists! Expected <{memory_threshold_mb:.2f}MB increase "
f"(based on {threshold_percent}% of baseline), "
f"but got {memory_increase_mb:.2f}MB. "
f"This indicates the memory leak fix is not working properly."
)
def test_mem_leak_retry_failures():
"""Test memory leak with task retry and eventual failure scenarios."""
test_instance = MemoryLeakUnhandledExceptionsTest()
# Enable memory tracing
tracemalloc.start()
# Get baseline
gc.collect()
baseline_memory = get_memory_usage()
# Run tasks that retry and eventually fail
for _ in range(100): # Fewer iterations since retries are expensive
try:
test_instance.task_retry_then_fail.apply()
except Exception:
pass
gc.collect()
after_retries_memory = get_memory_usage()
# Stop tracing
tracemalloc.stop()
# Calculate memory increase
memory_increase = after_retries_memory - baseline_memory
memory_increase_mb = memory_increase / 1024 / 1024
logger.debug("") # New line for better readability
logger.debug(f"Baseline memory: {baseline_memory / 1024 / 1024:.2f} MB")
logger.debug(f"After retries: {after_retries_memory / 1024 / 1024:.2f} MB")
logger.debug(f"Memory increase: {memory_increase_mb:.2f} MB")
# Retries should not show significant memory increase if fix is working
assert memory_increase_mb < 3, (
f"Memory leak in retry scenarios! Expected <3MB increase for 100 retry tasks, "
f"but got {memory_increase_mb:.2f}MB"
)
def test_mem_leak_nested_exception_stacks():
"""Test memory leak with deeply nested exception stacks and local variables."""
test_instance = MemoryLeakUnhandledExceptionsTest()
# Enable memory tracing
tracemalloc.start()
# Get baseline
gc.collect()
baseline_memory = get_memory_usage()
# Run tasks with complex exception stacks
for _ in range(200):
try:
test_instance.task_nested_exception_stack.apply()
except Exception:
pass
gc.collect()
after_complex_memory = get_memory_usage()
# Stop tracing
tracemalloc.stop()
# Calculate memory increase
memory_increase = after_complex_memory - baseline_memory
memory_increase_mb = memory_increase / 1024 / 1024
logger.debug("Memory usage results:")
logger.debug(f"Baseline memory: {baseline_memory / 1024 / 1024:.2f} MB")
logger.debug(f"After complex exceptions: {after_complex_memory / 1024 / 1024:.2f} MB")
logger.debug(f"Memory increase: {memory_increase_mb:.2f} MB")
# Complex exceptions should not show significant memory increase if fix is working
assert memory_increase_mb < 4, (
f"Memory leak in nested exception scenarios! Expected <4MB increase for 200 nested tasks, "
f"but got {memory_increase_mb:.2f}MB"
)
if __name__ == "__main__":
# Allow running these tests standalone for debugging
print("Running memory leak integration tests...")
test_mem_leak_unhandled_exceptions()
test_mem_leak_retry_failures()
test_mem_leak_nested_exception_stacks()
print("Memory leak integration tests completed")
|
MemoryLeakUnhandledExceptionsTest
|
python
|
huggingface__transformers
|
src/transformers/models/aria/modeling_aria.py
|
{
"start": 28365,
"end": 31510
}
|
class ____(AriaTextPreTrainedModel):
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[AriaTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = AriaTextRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
|
AriaTextModel
|
python
|
getsentry__sentry
|
src/sentry/dynamic_sampling/tasks/boost_low_volume_transactions.py
|
{
"start": 2227,
"end": 2472
}
|
class ____(ProjectIdentity, total=True):
"""
Information about the project transactions
"""
transaction_counts: list[tuple[str, float]]
total_num_transactions: float | None
total_num_classes: int | None
|
ProjectTransactions
|
python
|
encode__django-rest-framework
|
tests/test_fields.py
|
{
"start": 50866,
"end": 51459
}
|
class ____(FieldValues, TestCase):
"""
Valid and invalid values for `DateTimeField` with naive datetimes.
"""
valid_inputs = {
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=utc): datetime.datetime(2001, 1, 1, 13, 00),
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00),
}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '2001-01-01T13:00:00',
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=utc): '2001-01-01T13:00:00',
}
field = serializers.DateTimeField(default_timezone=None)
|
TestNaiveDateTimeField
|
python
|
getsentry__sentry
|
tests/sentry/models/test_file.py
|
{
"start": 526,
"end": 2502
}
|
class ____(TestCase):
def test_from_file(self) -> None:
fileobj = ContentFile(b"foo bar")
my_file1 = FileBlob.from_file(fileobj)
assert my_file1.path
fileobj.seek(0)
my_file2 = FileBlob.from_file(fileobj)
# deep check
assert my_file1.id == my_file2.id
assert my_file1.checksum == my_file2.checksum
assert my_file1.path == my_file2.path
def test_generate_unique_path(self) -> None:
path = FileBlob.generate_unique_path()
assert path
parts = path.split("/")
assert len(parts) == 3
assert list(map(len, parts)) == [2, 4, 26]
# Check uniqueness
path2 = FileBlob.generate_unique_path()
assert path != path2
@patch.object(FileBlob, "_delete_file_task")
def test_delete_handles_database_error(self, mock_task_factory: MagicMock) -> None:
fileobj = ContentFile(b"foo bar")
baz_file = File.objects.create(name="baz-v1.js", type="default", size=7)
baz_file.putfile(fileobj)
blob = baz_file.blobs.all()[0]
mock_delete_file_region = Mock()
mock_task_factory.return_value = mock_delete_file_region
with patch("sentry.models.file.super") as mock_super:
mock_super.side_effect = DatabaseError("server closed connection")
with self.tasks(), pytest.raises(DatabaseError):
blob.delete()
# Even though postgres failed we should still queue
# a task to delete the filestore object.
assert mock_delete_file_region.delay.call_count == 1
# blob is still around.
assert FileBlob.objects.get(id=blob.id)
def test_dedupe_works_with_cache(self) -> None:
contents = ContentFile(b"foo bar")
FileBlob.from_file(contents)
contents.seek(0)
file_1 = File.objects.create(name="foo")
file_1.putfile(contents)
assert FileBlob.objects.count() == 1
|
FileBlobTest
|
python
|
mitmproxy__pdoc
|
test/testdata/type_stubs/_utils.py
|
{
"start": 0,
"end": 100
}
|
class ____:
"""Docstring from imported py file - ideally this should be overridden."""
|
ImportedClass
|
python
|
more-itertools__more-itertools
|
tests/test_recipes.py
|
{
"start": 7080,
"end": 7290
}
|
class ____(TestCase):
"""Tests for ``dotproduct()``'"""
def test_happy_path(self):
"""simple dotproduct example"""
self.assertEqual(400, mi.dotproduct([10, 10], [20, 20]))
|
DotproductTests
|
python
|
python-excel__xlrd
|
xlrd/xldate.py
|
{
"start": 1165,
"end": 1226
}
|
class ____(XLDateError):
"``xldate < 0.00``"
|
XLDateNegative
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py
|
{
"start": 527,
"end": 2218
}
|
class ____(Formatter):
"""
Format tokens as Pango Markup code. It can then be rendered to an SVG.
.. versionadded:: 2.9
"""
name = 'Pango Markup'
aliases = ['pango', 'pangomarkup']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.styles = {}
for token, style in self.style:
start = ''
end = ''
if style['color']:
start += '<span fgcolor="#{}">'.format(style['color'])
end = '</span>' + end
if style['bold']:
start += '<b>'
end = '</b>' + end
if style['italic']:
start += '<i>'
end = '</i>' + end
if style['underline']:
start += '<u>'
end = '</u>' + end
self.styles[token] = (start, end)
def format_unencoded(self, tokensource, outfile):
lastval = ''
lasttype = None
outfile.write('<tt>')
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
if ttype == lasttype:
lastval += escape_special_chars(value)
else:
if lastval:
stylebegin, styleend = self.styles[lasttype]
outfile.write(stylebegin + lastval + styleend)
lastval = escape_special_chars(value)
lasttype = ttype
if lastval:
stylebegin, styleend = self.styles[lasttype]
outfile.write(stylebegin + lastval + styleend)
outfile.write('</tt>')
|
PangoMarkupFormatter
|
python
|
python-openxml__python-docx
|
src/docx/oxml/simpletypes.py
|
{
"start": 10466,
"end": 10736
}
|
class ____(XsdLong):
@classmethod
def convert_from_xml(cls, str_value: str) -> Length:
return Emu(int(str_value))
@classmethod
def validate(cls, value: Any) -> None:
cls.validate_int_in_range(value, 0, 27273042316900)
|
ST_PositiveCoordinate
|
python
|
google__jax
|
jax/_src/pallas/mosaic_gpu/core.py
|
{
"start": 48257,
"end": 48891
}
|
class ____(effects.Effect):
pass
effects.control_flow_allowed_effects.add_type(_WGMMAPipelineEffect)
_wgmma_pipeline_effect = _WGMMAPipelineEffect()
# We define the layout_cast primitive here, because it needs to be available in
# the lowering code (to provide layout hints to the rules).
layout_cast_p = jax_core.Primitive("layout_cast")
@layout_cast_p.def_abstract_eval
def _layout_cast_abstract_eval(x, new_layout):
del new_layout # Unused.
return x
def layout_cast(x: Any, new_layout: SomeLayout):
"""Casts the layout of the given array."""
return layout_cast_p.bind(x, new_layout=new_layout)
|
_WGMMAPipelineEffect
|
python
|
huggingface__transformers
|
src/transformers/models/sam3/modeling_sam3.py
|
{
"start": 1776,
"end": 2594
}
|
class ____(ModelOutput):
r"""
fpn_hidden_states (`tuple[torch.FloatTensor]`):
Tuple of multi-level FPN feature maps.
fpn_position_encoding (`tuple[torch.FloatTensor]`):
Tuple of position encodings for each FPN level.
hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all ViT layers.
attentions (`tuple[torch.FloatTensor]`, *optional*):
Tuple of attention weights from all ViT layers.
"""
last_hidden_state: torch.FloatTensor = None
fpn_hidden_states: tuple[torch.FloatTensor, ...] = None
fpn_position_encoding: tuple[torch.FloatTensor, ...] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring
|
Sam3VisionEncoderOutput
|
python
|
ray-project__ray
|
python/ray/_private/accelerators/nvidia_gpu.py
|
{
"start": 513,
"end": 4087
}
|
class ____(AcceleratorManager):
"""NVIDIA GPU accelerators."""
@staticmethod
def get_resource_name() -> str:
return "GPU"
@staticmethod
def get_visible_accelerator_ids_env_var() -> str:
return CUDA_VISIBLE_DEVICES_ENV_VAR
@staticmethod
def get_current_process_visible_accelerator_ids() -> Optional[List[str]]:
cuda_visible_devices = os.environ.get(
NvidiaGPUAcceleratorManager.get_visible_accelerator_ids_env_var(), None
)
if cuda_visible_devices is None:
return None
if cuda_visible_devices == "":
return []
if cuda_visible_devices == "NoDevFiles":
return []
return list(cuda_visible_devices.split(","))
@staticmethod
def get_current_node_num_accelerators() -> int:
import ray._private.thirdparty.pynvml as pynvml
try:
pynvml.nvmlInit()
except pynvml.NVMLError:
return 0 # pynvml init failed
device_count = pynvml.nvmlDeviceGetCount()
pynvml.nvmlShutdown()
return device_count
@staticmethod
def get_current_node_accelerator_type() -> Optional[str]:
import ray._private.thirdparty.pynvml as pynvml
try:
pynvml.nvmlInit()
except pynvml.NVMLError:
return None # pynvml init failed
device_count = pynvml.nvmlDeviceGetCount()
cuda_device_type = None
if device_count > 0:
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
device_name = pynvml.nvmlDeviceGetName(handle)
if isinstance(device_name, bytes):
device_name = device_name.decode("utf-8")
cuda_device_type = (
NvidiaGPUAcceleratorManager._gpu_name_to_accelerator_type(device_name)
)
pynvml.nvmlShutdown()
return cuda_device_type
@staticmethod
def _gpu_name_to_accelerator_type(name):
if name is None:
return None
match = NVIDIA_GPU_NAME_PATTERN.match(name)
return match.group(1) if match else None
@staticmethod
def validate_resource_request_quantity(
quantity: float,
) -> Tuple[bool, Optional[str]]:
return (True, None)
@staticmethod
def set_current_process_visible_accelerator_ids(
visible_cuda_devices: List[str],
) -> None:
if os.environ.get(NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR):
return
os.environ[
NvidiaGPUAcceleratorManager.get_visible_accelerator_ids_env_var()
] = ",".join([str(i) for i in visible_cuda_devices])
@staticmethod
def get_ec2_instance_num_accelerators(
instance_type: str, instances: dict
) -> Optional[int]:
if instance_type not in instances:
return None
gpus = instances[instance_type].get("GpuInfo", {}).get("Gpus")
if gpus is not None:
# TODO(ameer): currently we support one gpu type per node.
assert len(gpus) == 1
return gpus[0]["Count"]
return None
@staticmethod
def get_ec2_instance_accelerator_type(
instance_type: str, instances: dict
) -> Optional[str]:
if instance_type not in instances:
return None
gpus = instances[instance_type].get("GpuInfo", {}).get("Gpus")
if gpus is not None:
# TODO(ameer): currently we support one gpu type per node.
assert len(gpus) == 1
return gpus[0]["Name"]
return None
|
NvidiaGPUAcceleratorManager
|
python
|
ethereum__web3.py
|
web3/_utils/events.py
|
{
"start": 12104,
"end": 12775
}
|
class ____(BaseEventFilterBuilder):
def deploy(self, w3: "Web3") -> "LogFilter":
if not isinstance(w3, web3.Web3):
raise Web3ValueError(f"Invalid web3 argument: got: {w3!r}")
for arg in self.args.values():
arg._immutable = True
self._immutable = True
log_filter = cast("LogFilter", w3.eth.filter(self.filter_params))
log_filter.filter_params = self.filter_params
log_filter.set_data_filters(self.data_argument_values)
log_filter.builder = self
if self.formatter is not None:
log_filter.log_entry_formatter = self.formatter
return log_filter
|
EventFilterBuilder
|
python
|
getsentry__sentry
|
src/sentry/models/groupmeta.py
|
{
"start": 433,
"end": 2801
}
|
class ____(BaseManager["GroupMeta"]):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__local_cache = threading.local()
def __getstate__(self):
d = self.__dict__.copy()
d.pop("_GroupMetaManager__local_cache", None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__local_cache = threading.local()
def _get_cache(self):
if not hasattr(self.__local_cache, "value"):
self.__local_cache.value = {}
return self.__local_cache.value
def _set_cache(self, value):
self.__local_cache.value = value
__cache = property(_get_cache, _set_cache)
def contribute_to_class(self, model, name):
super().contribute_to_class(model, name)
request_finished.connect(self.clear_local_cache)
def clear_local_cache(self, **kwargs):
self.__cache = {}
def populate_cache(self, instance_list):
for group in instance_list:
self.__cache.setdefault(group.id, {})
results = self.filter(group__in=instance_list).values_list("group", "key", "value")
for group_id, key, value in results:
self.__cache[group_id][key] = value
def get_value_bulk(self, instance_list, key, default=None):
results = {}
for instance in instance_list:
try:
inst_cache = self.__cache[instance.id]
except KeyError:
raise GroupMetaCacheNotPopulated(ERR_CACHE_MISSING % (instance.id,))
results[instance] = inst_cache.get(key, default)
return results
def get_value(self, instance, key, default=None):
try:
inst_cache = self.__cache[instance.id]
except KeyError:
raise GroupMetaCacheNotPopulated(ERR_CACHE_MISSING % (instance.id,))
return inst_cache.get(key, default)
def unset_value(self, instance, key):
self.filter(group=instance, key=key).delete()
try:
del self.__cache[instance.id][key]
except KeyError:
pass
def set_value(self, instance, key, value):
self.create_or_update(group=instance, key=key, values={"value": value})
self.__cache.setdefault(instance.id, {})
self.__cache[instance.id][key] = value
@region_silo_model
|
GroupMetaManager
|
python
|
sqlalchemy__sqlalchemy
|
examples/versioned_rows/versioned_rows_w_versionid.py
|
{
"start": 967,
"end": 2717
}
|
class ____:
# we have a composite primary key consisting of "id"
# and "version_id"
id = Column(Integer, primary_key=True)
version_id = Column(Integer, primary_key=True, default=1)
# optional - add a persisted is_current_version column
is_current_version = Column(Boolean, default=True)
# optional - add a calculated is_current_version column
@classmethod
def __declare_last__(cls):
alias = cls.__table__.alias()
cls.calc_is_current_version = column_property(
select(func.max(alias.c.version_id) == cls.version_id).where(
alias.c.id == cls.id
)
)
def new_version(self, session):
# optional - set previous version to have is_current_version=False
old_id = self.id
session.query(self.__class__).filter_by(id=old_id).update(
values=dict(is_current_version=False), synchronize_session=False
)
# make us transient (removes persistent
# identity).
make_transient(self)
# increment version_id, which means we have a new PK.
self.version_id += 1
@event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.dirty:
if not isinstance(instance, Versioned):
continue
if not session.is_modified(instance):
continue
if not attributes.instance_state(instance).has_identity:
continue
# make it transient
instance.new_version(session)
# re-add
session.add(instance)
Base = declarative_base()
engine = create_engine("sqlite://", echo=True)
Session = sessionmaker(engine)
# example 1, simple versioning
|
Versioned
|
python
|
kamyu104__LeetCode-Solutions
|
Python/integer-break.py
|
{
"start": 1706,
"end": 2115
}
|
class ____(object):
def integerBreak(self, n):
"""
:type n: int
:rtype: int
"""
if n < 4:
return n - 1
# integerBreak(n) = max(integerBreak(n - 2) * 2, integerBreak(n - 3) * 3)
res = [0, 1, 2, 3]
for i in xrange(4, n + 1):
res[i % 4] = max(res[(i - 2) % 4] * 2, res[(i - 3) % 4] * 3)
return res[n % 4]
|
Solution2
|
python
|
gevent__gevent
|
src/greentest/3.14/test_urllib2.py
|
{
"start": 11656,
"end": 11804
}
|
class ____:
def read(self, count=None):
pass
def readline(self, count=None):
pass
def close(self):
pass
|
MockFile
|
python
|
gevent__gevent
|
src/greentest/3.10/test_socket.py
|
{
"start": 171238,
"end": 171401
}
|
class ____(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
|
SendmsgUnixStreamTest
|
python
|
pypa__setuptools
|
setuptools/_vendor/typeguard/_transformer.py
|
{
"start": 15816,
"end": 44937
}
|
class ____(NodeTransformer):
def __init__(
self, target_path: Sequence[str] | None = None, target_lineno: int | None = None
) -> None:
self._target_path = tuple(target_path) if target_path else None
self._memo = self._module_memo = TransformMemo(None, None, ())
self.names_used_in_annotations: set[str] = set()
self.target_node: FunctionDef | AsyncFunctionDef | None = None
self.target_lineno = target_lineno
def generic_visit(self, node: AST) -> AST:
has_non_empty_body_initially = bool(getattr(node, "body", None))
initial_type = type(node)
node = super().generic_visit(node)
if (
type(node) is initial_type
and has_non_empty_body_initially
and hasattr(node, "body")
and not node.body
):
# If we have still the same node type after transformation
# but we've optimised it's body away, we add a `pass` statement.
node.body = [Pass()]
return node
@contextmanager
def _use_memo(
self, node: ClassDef | FunctionDef | AsyncFunctionDef
) -> Generator[None, Any, None]:
new_memo = TransformMemo(node, self._memo, self._memo.path + (node.name,))
old_memo = self._memo
self._memo = new_memo
if isinstance(node, (FunctionDef, AsyncFunctionDef)):
new_memo.should_instrument = (
self._target_path is None or new_memo.path == self._target_path
)
if new_memo.should_instrument:
# Check if the function is a generator function
detector = GeneratorDetector()
detector.visit(node)
# Extract yield, send and return types where possible from a subscripted
# annotation like Generator[int, str, bool]
return_annotation = deepcopy(node.returns)
if detector.contains_yields and new_memo.name_matches(
return_annotation, *generator_names
):
if isinstance(return_annotation, Subscript):
annotation_slice = return_annotation.slice
# Python < 3.9
if isinstance(annotation_slice, Index):
annotation_slice = (
annotation_slice.value # type: ignore[attr-defined]
)
if isinstance(annotation_slice, Tuple):
items = annotation_slice.elts
else:
items = [annotation_slice]
if len(items) > 0:
new_memo.yield_annotation = self._convert_annotation(
items[0]
)
if len(items) > 1:
new_memo.send_annotation = self._convert_annotation(
items[1]
)
if len(items) > 2:
new_memo.return_annotation = self._convert_annotation(
items[2]
)
else:
new_memo.return_annotation = self._convert_annotation(
return_annotation
)
if isinstance(node, AsyncFunctionDef):
new_memo.is_async = True
yield
self._memo = old_memo
def _get_import(self, module: str, name: str) -> Name:
memo = self._memo if self._target_path else self._module_memo
return memo.get_import(module, name)
@overload
def _convert_annotation(self, annotation: None) -> None: ...
@overload
def _convert_annotation(self, annotation: expr) -> expr: ...
def _convert_annotation(self, annotation: expr | None) -> expr | None:
if annotation is None:
return None
# Convert PEP 604 unions (x | y) and generic built-in collections where
# necessary, and undo forward references
new_annotation = cast(expr, AnnotationTransformer(self).visit(annotation))
if isinstance(new_annotation, expr):
new_annotation = ast.copy_location(new_annotation, annotation)
# Store names used in the annotation
names = {node.id for node in walk(new_annotation) if isinstance(node, Name)}
self.names_used_in_annotations.update(names)
return new_annotation
def visit_Name(self, node: Name) -> Name:
self._memo.local_names.add(node.id)
return node
def visit_Module(self, node: Module) -> Module:
self._module_memo = self._memo = TransformMemo(node, None, ())
self.generic_visit(node)
self._module_memo.insert_imports(node)
fix_missing_locations(node)
return node
def visit_Import(self, node: Import) -> Import:
for name in node.names:
self._memo.local_names.add(name.asname or name.name)
self._memo.imported_names[name.asname or name.name] = name.name
return node
def visit_ImportFrom(self, node: ImportFrom) -> ImportFrom:
for name in node.names:
if name.name != "*":
alias = name.asname or name.name
self._memo.local_names.add(alias)
self._memo.imported_names[alias] = f"{node.module}.{name.name}"
return node
def visit_ClassDef(self, node: ClassDef) -> ClassDef | None:
self._memo.local_names.add(node.name)
# Eliminate top level classes not belonging to the target path
if (
self._target_path is not None
and not self._memo.path
and node.name != self._target_path[0]
):
return None
with self._use_memo(node):
for decorator in node.decorator_list.copy():
if self._memo.name_matches(decorator, "typeguard.typechecked"):
# Remove the decorator to prevent duplicate instrumentation
node.decorator_list.remove(decorator)
# Store any configuration overrides
if isinstance(decorator, Call) and decorator.keywords:
self._memo.configuration_overrides.update(
{kw.arg: kw.value for kw in decorator.keywords if kw.arg}
)
self.generic_visit(node)
return node
def visit_FunctionDef(
self, node: FunctionDef | AsyncFunctionDef
) -> FunctionDef | AsyncFunctionDef | None:
"""
Injects type checks for function arguments, and for a return of None if the
function is annotated to return something else than Any or None, and the body
ends without an explicit "return".
"""
self._memo.local_names.add(node.name)
# Eliminate top level functions not belonging to the target path
if (
self._target_path is not None
and not self._memo.path
and node.name != self._target_path[0]
):
return None
# Skip instrumentation if we're instrumenting the whole module and the function
# contains either @no_type_check or @typeguard_ignore
if self._target_path is None:
for decorator in node.decorator_list:
if self._memo.name_matches(decorator, *ignore_decorators):
return node
with self._use_memo(node):
arg_annotations: dict[str, Any] = {}
if self._target_path is None or self._memo.path == self._target_path:
# Find line number we're supposed to match against
if node.decorator_list:
first_lineno = node.decorator_list[0].lineno
else:
first_lineno = node.lineno
for decorator in node.decorator_list.copy():
if self._memo.name_matches(decorator, "typing.overload"):
# Remove overloads entirely
return None
elif self._memo.name_matches(decorator, "typeguard.typechecked"):
# Remove the decorator to prevent duplicate instrumentation
node.decorator_list.remove(decorator)
# Store any configuration overrides
if isinstance(decorator, Call) and decorator.keywords:
self._memo.configuration_overrides = {
kw.arg: kw.value for kw in decorator.keywords if kw.arg
}
if self.target_lineno == first_lineno:
assert self.target_node is None
self.target_node = node
if node.decorator_list:
self.target_lineno = node.decorator_list[0].lineno
else:
self.target_lineno = node.lineno
all_args = node.args.args + node.args.kwonlyargs + node.args.posonlyargs
# Ensure that any type shadowed by the positional or keyword-only
# argument names are ignored in this function
for arg in all_args:
self._memo.ignored_names.add(arg.arg)
# Ensure that any type shadowed by the variable positional argument name
# (e.g. "args" in *args) is ignored this function
if node.args.vararg:
self._memo.ignored_names.add(node.args.vararg.arg)
# Ensure that any type shadowed by the variable keywrod argument name
# (e.g. "kwargs" in *kwargs) is ignored this function
if node.args.kwarg:
self._memo.ignored_names.add(node.args.kwarg.arg)
for arg in all_args:
annotation = self._convert_annotation(deepcopy(arg.annotation))
if annotation:
arg_annotations[arg.arg] = annotation
if node.args.vararg:
annotation_ = self._convert_annotation(node.args.vararg.annotation)
if annotation_:
if sys.version_info >= (3, 9):
container = Name("tuple", ctx=Load())
else:
container = self._get_import("typing", "Tuple")
subscript_slice: Tuple | Index = Tuple(
[
annotation_,
Constant(Ellipsis),
],
ctx=Load(),
)
if sys.version_info < (3, 9):
subscript_slice = Index(subscript_slice, ctx=Load())
arg_annotations[node.args.vararg.arg] = Subscript(
container, subscript_slice, ctx=Load()
)
if node.args.kwarg:
annotation_ = self._convert_annotation(node.args.kwarg.annotation)
if annotation_:
if sys.version_info >= (3, 9):
container = Name("dict", ctx=Load())
else:
container = self._get_import("typing", "Dict")
subscript_slice = Tuple(
[
Name("str", ctx=Load()),
annotation_,
],
ctx=Load(),
)
if sys.version_info < (3, 9):
subscript_slice = Index(subscript_slice, ctx=Load())
arg_annotations[node.args.kwarg.arg] = Subscript(
container, subscript_slice, ctx=Load()
)
if arg_annotations:
self._memo.variable_annotations.update(arg_annotations)
self.generic_visit(node)
if arg_annotations:
annotations_dict = Dict(
keys=[Constant(key) for key in arg_annotations.keys()],
values=[
Tuple([Name(key, ctx=Load()), annotation], ctx=Load())
for key, annotation in arg_annotations.items()
],
)
func_name = self._get_import(
"typeguard._functions", "check_argument_types"
)
args = [
self._memo.joined_path,
annotations_dict,
self._memo.get_memo_name(),
]
node.body.insert(
self._memo.code_inject_index, Expr(Call(func_name, args, []))
)
# Add a checked "return None" to the end if there's no explicit return
# Skip if the return annotation is None or Any
if (
self._memo.return_annotation
and (not self._memo.is_async or not self._memo.has_yield_expressions)
and not isinstance(node.body[-1], Return)
and (
not isinstance(self._memo.return_annotation, Constant)
or self._memo.return_annotation.value is not None
)
):
func_name = self._get_import(
"typeguard._functions", "check_return_type"
)
return_node = Return(
Call(
func_name,
[
self._memo.joined_path,
Constant(None),
self._memo.return_annotation,
self._memo.get_memo_name(),
],
[],
)
)
# Replace a placeholder "pass" at the end
if isinstance(node.body[-1], Pass):
copy_location(return_node, node.body[-1])
del node.body[-1]
node.body.append(return_node)
# Insert code to create the call memo, if it was ever needed for this
# function
if self._memo.memo_var_name:
memo_kwargs: dict[str, Any] = {}
if self._memo.parent and isinstance(self._memo.parent.node, ClassDef):
for decorator in node.decorator_list:
if (
isinstance(decorator, Name)
and decorator.id == "staticmethod"
):
break
elif (
isinstance(decorator, Name)
and decorator.id == "classmethod"
):
memo_kwargs["self_type"] = Name(
id=node.args.args[0].arg, ctx=Load()
)
break
else:
if node.args.args:
if node.name == "__new__":
memo_kwargs["self_type"] = Name(
id=node.args.args[0].arg, ctx=Load()
)
else:
memo_kwargs["self_type"] = Attribute(
Name(id=node.args.args[0].arg, ctx=Load()),
"__class__",
ctx=Load(),
)
# Construct the function reference
# Nested functions get special treatment: the function name is added
# to free variables (and the closure of the resulting function)
names: list[str] = [node.name]
memo = self._memo.parent
while memo:
if isinstance(memo.node, (FunctionDef, AsyncFunctionDef)):
# This is a nested function. Use the function name as-is.
del names[:-1]
break
elif not isinstance(memo.node, ClassDef):
break
names.insert(0, memo.node.name)
memo = memo.parent
config_keywords = self._memo.get_config_keywords()
if config_keywords:
memo_kwargs["config"] = Call(
self._get_import("dataclasses", "replace"),
[self._get_import("typeguard._config", "global_config")],
config_keywords,
)
self._memo.memo_var_name.id = self._memo.get_unused_name("memo")
memo_store_name = Name(id=self._memo.memo_var_name.id, ctx=Store())
globals_call = Call(Name(id="globals", ctx=Load()), [], [])
locals_call = Call(Name(id="locals", ctx=Load()), [], [])
memo_expr = Call(
self._get_import("typeguard", "TypeCheckMemo"),
[globals_call, locals_call],
[keyword(key, value) for key, value in memo_kwargs.items()],
)
node.body.insert(
self._memo.code_inject_index,
Assign([memo_store_name], memo_expr),
)
self._memo.insert_imports(node)
# Special case the __new__() method to create a local alias from the
# class name to the first argument (usually "cls")
if (
isinstance(node, FunctionDef)
and node.args
and self._memo.parent is not None
and isinstance(self._memo.parent.node, ClassDef)
and node.name == "__new__"
):
first_args_expr = Name(node.args.args[0].arg, ctx=Load())
cls_name = Name(self._memo.parent.node.name, ctx=Store())
node.body.insert(
self._memo.code_inject_index,
Assign([cls_name], first_args_expr),
)
# Rmove any placeholder "pass" at the end
if isinstance(node.body[-1], Pass):
del node.body[-1]
return node
def visit_AsyncFunctionDef(
self, node: AsyncFunctionDef
) -> FunctionDef | AsyncFunctionDef | None:
return self.visit_FunctionDef(node)
def visit_Return(self, node: Return) -> Return:
"""This injects type checks into "return" statements."""
self.generic_visit(node)
if (
self._memo.return_annotation
and self._memo.should_instrument
and not self._memo.is_ignored_name(self._memo.return_annotation)
):
func_name = self._get_import("typeguard._functions", "check_return_type")
old_node = node
retval = old_node.value or Constant(None)
node = Return(
Call(
func_name,
[
self._memo.joined_path,
retval,
self._memo.return_annotation,
self._memo.get_memo_name(),
],
[],
)
)
copy_location(node, old_node)
return node
def visit_Yield(self, node: Yield) -> Yield | Call:
"""
This injects type checks into "yield" expressions, checking both the yielded
value and the value sent back to the generator, when appropriate.
"""
self._memo.has_yield_expressions = True
self.generic_visit(node)
if (
self._memo.yield_annotation
and self._memo.should_instrument
and not self._memo.is_ignored_name(self._memo.yield_annotation)
):
func_name = self._get_import("typeguard._functions", "check_yield_type")
yieldval = node.value or Constant(None)
node.value = Call(
func_name,
[
self._memo.joined_path,
yieldval,
self._memo.yield_annotation,
self._memo.get_memo_name(),
],
[],
)
if (
self._memo.send_annotation
and self._memo.should_instrument
and not self._memo.is_ignored_name(self._memo.send_annotation)
):
func_name = self._get_import("typeguard._functions", "check_send_type")
old_node = node
call_node = Call(
func_name,
[
self._memo.joined_path,
old_node,
self._memo.send_annotation,
self._memo.get_memo_name(),
],
[],
)
copy_location(call_node, old_node)
return call_node
return node
def visit_AnnAssign(self, node: AnnAssign) -> Any:
"""
This injects a type check into a local variable annotation-assignment within a
function body.
"""
self.generic_visit(node)
if (
isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef))
and node.annotation
and isinstance(node.target, Name)
):
self._memo.ignored_names.add(node.target.id)
annotation = self._convert_annotation(deepcopy(node.annotation))
if annotation:
self._memo.variable_annotations[node.target.id] = annotation
if node.value:
func_name = self._get_import(
"typeguard._functions", "check_variable_assignment"
)
node.value = Call(
func_name,
[
node.value,
Constant(node.target.id),
annotation,
self._memo.get_memo_name(),
],
[],
)
return node
def visit_Assign(self, node: Assign) -> Any:
"""
This injects a type check into a local variable assignment within a function
body. The variable must have been annotated earlier in the function body.
"""
self.generic_visit(node)
# Only instrument function-local assignments
if isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)):
targets: list[dict[Constant, expr | None]] = []
check_required = False
for target in node.targets:
elts: Sequence[expr]
if isinstance(target, Name):
elts = [target]
elif isinstance(target, Tuple):
elts = target.elts
else:
continue
annotations_: dict[Constant, expr | None] = {}
for exp in elts:
prefix = ""
if isinstance(exp, Starred):
exp = exp.value
prefix = "*"
if isinstance(exp, Name):
self._memo.ignored_names.add(exp.id)
name = prefix + exp.id
annotation = self._memo.variable_annotations.get(exp.id)
if annotation:
annotations_[Constant(name)] = annotation
check_required = True
else:
annotations_[Constant(name)] = None
targets.append(annotations_)
if check_required:
# Replace missing annotations with typing.Any
for item in targets:
for key, expression in item.items():
if expression is None:
item[key] = self._get_import("typing", "Any")
if len(targets) == 1 and len(targets[0]) == 1:
func_name = self._get_import(
"typeguard._functions", "check_variable_assignment"
)
target_varname = next(iter(targets[0]))
node.value = Call(
func_name,
[
node.value,
target_varname,
targets[0][target_varname],
self._memo.get_memo_name(),
],
[],
)
elif targets:
func_name = self._get_import(
"typeguard._functions", "check_multi_variable_assignment"
)
targets_arg = List(
[
Dict(keys=list(target), values=list(target.values()))
for target in targets
],
ctx=Load(),
)
node.value = Call(
func_name,
[node.value, targets_arg, self._memo.get_memo_name()],
[],
)
return node
def visit_NamedExpr(self, node: NamedExpr) -> Any:
"""This injects a type check into an assignment expression (a := foo())."""
self.generic_visit(node)
# Only instrument function-local assignments
if isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)) and isinstance(
node.target, Name
):
self._memo.ignored_names.add(node.target.id)
# Bail out if no matching annotation is found
annotation = self._memo.variable_annotations.get(node.target.id)
if annotation is None:
return node
func_name = self._get_import(
"typeguard._functions", "check_variable_assignment"
)
node.value = Call(
func_name,
[
node.value,
Constant(node.target.id),
annotation,
self._memo.get_memo_name(),
],
[],
)
return node
def visit_AugAssign(self, node: AugAssign) -> Any:
"""
This injects a type check into an augmented assignment expression (a += 1).
"""
self.generic_visit(node)
# Only instrument function-local assignments
if isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)) and isinstance(
node.target, Name
):
# Bail out if no matching annotation is found
annotation = self._memo.variable_annotations.get(node.target.id)
if annotation is None:
return node
# Bail out if the operator is not found (newer Python version?)
try:
operator_func_name = aug_assign_functions[node.op.__class__]
except KeyError:
return node
operator_func = self._get_import("operator", operator_func_name)
operator_call = Call(
operator_func, [Name(node.target.id, ctx=Load()), node.value], []
)
check_call = Call(
self._get_import("typeguard._functions", "check_variable_assignment"),
[
operator_call,
Constant(node.target.id),
annotation,
self._memo.get_memo_name(),
],
[],
)
return Assign(targets=[node.target], value=check_call)
return node
def visit_If(self, node: If) -> Any:
"""
This blocks names from being collected from a module-level
"if typing.TYPE_CHECKING:" block, so that they won't be type checked.
"""
self.generic_visit(node)
if (
self._memo is self._module_memo
and isinstance(node.test, Name)
and self._memo.name_matches(node.test, "typing.TYPE_CHECKING")
):
collector = NameCollector()
collector.visit(node)
self._memo.ignored_names.update(collector.names)
return node
|
TypeguardTransformer
|
python
|
python__mypy
|
mypy/semanal_shared.py
|
{
"start": 3855,
"end": 9621
}
|
class ____(SemanticAnalyzerCoreInterface):
"""A limited abstract interface to some generic semantic analyzer pass 2 functionality.
We use this interface for various reasons:
* Looser coupling
* Cleaner import graph
* Less need to pass around callback functions
"""
tvar_scope: TypeVarLikeScope
@abstractmethod
def lookup(
self, name: str, ctx: Context, suppress_errors: bool = False
) -> SymbolTableNode | None:
raise NotImplementedError
@abstractmethod
def named_type(self, fullname: str, args: list[Type] | None = None) -> Instance:
raise NotImplementedError
@abstractmethod
def named_type_or_none(self, fullname: str, args: list[Type] | None = None) -> Instance | None:
raise NotImplementedError
@abstractmethod
def accept(self, node: Node) -> None:
raise NotImplementedError
@abstractmethod
def anal_type(
self,
typ: Type,
/,
*,
tvar_scope: TypeVarLikeScope | None = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_typed_dict_special_forms: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True,
prohibit_self_type: str | None = None,
prohibit_special_class_field_types: str | None = None,
) -> Type | None:
raise NotImplementedError
@abstractmethod
def get_and_bind_all_tvars(self, type_exprs: list[Expression]) -> list[TypeVarLikeType]:
raise NotImplementedError
@abstractmethod
def basic_new_typeinfo(self, name: str, basetype_or_fallback: Instance, line: int) -> TypeInfo:
raise NotImplementedError
@abstractmethod
def schedule_patch(self, priority: int, patch: Callable[[], None]) -> None:
raise NotImplementedError
@abstractmethod
def add_symbol_table_node(self, name: str, symbol: SymbolTableNode) -> bool:
"""Add node to the current symbol table."""
raise NotImplementedError
@abstractmethod
def current_symbol_table(self) -> SymbolTable:
"""Get currently active symbol table.
May be module, class, or local namespace.
"""
raise NotImplementedError
@abstractmethod
def add_symbol(
self,
name: str,
node: SymbolNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False,
can_defer: bool = True,
) -> bool:
"""Add symbol to the current symbol table."""
raise NotImplementedError
@abstractmethod
def add_symbol_skip_local(self, name: str, node: SymbolNode) -> None:
"""Add symbol to the current symbol table, skipping locals.
This is used to store symbol nodes in a symbol table that
is going to be serialized (local namespaces are not serialized).
See implementation docstring for more details.
"""
raise NotImplementedError
@abstractmethod
def parse_bool(self, expr: Expression) -> bool | None:
raise NotImplementedError
@abstractmethod
def qualified_name(self, name: str) -> str:
raise NotImplementedError
@property
@abstractmethod
def is_typeshed_stub_file(self) -> bool:
raise NotImplementedError
@abstractmethod
def process_placeholder(
self, name: str | None, kind: str, ctx: Context, force_progress: bool = False
) -> None:
raise NotImplementedError
def set_callable_name(sig: Type, fdef: FuncDef) -> ProperType:
sig = get_proper_type(sig)
if isinstance(sig, FunctionLike):
if fdef.info:
if fdef.info.fullname in TPDICT_FB_NAMES:
# Avoid exposing the internal _TypedDict name.
class_name = "TypedDict"
else:
class_name = fdef.info.name
return sig.with_name(f"{fdef.name} of {class_name}")
else:
return sig.with_name(fdef.name)
else:
return sig
def calculate_tuple_fallback(typ: TupleType) -> None:
"""Calculate a precise item type for the fallback of a tuple type.
This must be called only after the main semantic analysis pass, since joins
aren't available before that.
Note that there is an apparent chicken and egg problem with respect
to verifying type arguments against bounds. Verifying bounds might
require fallbacks, but we might use the bounds to calculate the
fallbacks. In practice this is not a problem, since the worst that
can happen is that we have invalid type argument values, and these
can happen in later stages as well (they will generate errors, but
we don't prevent their existence).
"""
fallback = typ.partial_fallback
assert fallback.type.fullname == "builtins.tuple"
items = []
for item in flatten_nested_tuples(typ.items):
# TODO: this duplicates some logic in typeops.tuple_fallback().
if isinstance(item, UnpackType):
unpacked_type = get_proper_type(item.type)
if isinstance(unpacked_type, TypeVarTupleType):
unpacked_type = get_proper_type(unpacked_type.upper_bound)
if (
isinstance(unpacked_type, Instance)
and unpacked_type.type.fullname == "builtins.tuple"
):
items.append(unpacked_type.args[0])
else:
# This is called before semanal_typeargs.py fixes broken unpacks,
# where the error should also be generated.
items.append(AnyType(TypeOfAny.from_error))
else:
items.append(item)
fallback.args = (make_simplified_union(items),)
|
SemanticAnalyzerInterface
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_numeric.py
|
{
"start": 79787,
"end": 86548
}
|
class ____(TestCase):
"""Test ones_like, zeros_like, empty_like and full_like"""
def setUp(self):
super().setUp()
self.data = [
# Array scalars
(np.array(3.0), None),
(np.array(3), "f8"),
# 1D arrays
(np.arange(6, dtype="f4"), None),
(np.arange(6), "c16"),
# 2D C-layout arrays
(np.arange(6).reshape(2, 3), None),
(np.arange(6).reshape(3, 2), "i1"),
# 2D F-layout arrays
(np.arange(6).reshape((2, 3), order="F"), None),
(np.arange(6).reshape((3, 2), order="F"), "i1"),
# 3D C-layout arrays
(np.arange(24).reshape(2, 3, 4), None),
(np.arange(24).reshape(4, 3, 2), "f4"),
# 3D F-layout arrays
(np.arange(24).reshape((2, 3, 4), order="F"), None),
(np.arange(24).reshape((4, 3, 2), order="F"), "f4"),
# 3D non-C/F-layout arrays
(np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
(np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), "?"),
]
self.shapes = [
(),
(5,),
(
5,
6,
),
(
5,
6,
7,
),
]
def compare_array_value(self, dz, value, fill_value):
if value is not None:
if fill_value:
# Conversion is close to what np.full_like uses
# but we may want to convert directly in the future
# which may result in errors (where this does not).
z = np.array(value).astype(dz.dtype)
assert_(np.all(dz == z))
else:
assert_(np.all(dz == value))
def check_like_function(self, like_function, value, fill_value=False):
if fill_value:
fill_kwarg = {"fill_value": value}
else:
fill_kwarg = {}
for d, dtype in self.data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_equal(
np.array(dz.strides) * d.dtype.itemsize,
np.array(d.strides) * dz.dtype.itemsize,
)
assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# C order, default dtype
dz = like_function(d, order="C", dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# F order, default dtype
dz = like_function(d, order="F", dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# A order
dz = like_function(d, order="A", dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# Test the 'shape' parameter
for s in self.shapes:
for o in "CFA":
sz = like_function(d, dtype=dtype, shape=s, order=o, **fill_kwarg)
assert_equal(sz.shape, s)
if dtype is None:
assert_equal(sz.dtype, d.dtype)
else:
assert_equal(sz.dtype, np.dtype(dtype))
if o == "C" or (o == "A" and d.flags.c_contiguous):
assert_(sz.flags.c_contiguous)
elif o == "F" or (o == "A" and d.flags.f_contiguous):
assert_(sz.flags.f_contiguous)
self.compare_array_value(sz, value, fill_value)
if d.ndim != len(s):
assert_equal(
np.argsort(
like_function(
d, dtype=dtype, shape=s, order="K", **fill_kwarg
).strides
),
np.argsort(np.empty(s, dtype=dtype, order="C").strides),
)
else:
assert_equal(
np.argsort(
like_function(
d, dtype=dtype, shape=s, order="K", **fill_kwarg
).strides
),
np.argsort(d.strides),
)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
# Inf to integer casts cause invalid-value errors: ignore them.
self.check_like_function(np.full_like, np.inf, True)
@parametrize("likefunc", [np.empty_like, np.full_like, np.zeros_like, np.ones_like])
@parametrize("dtype", [str, bytes])
def test_dtype_str_bytes(self, likefunc, dtype):
# Regression test for gh-19860
a = np.arange(16).reshape(2, 8)
b = a[:, ::2] # Ensure b is not contiguous.
kwargs = {"fill_value": ""} if likefunc is np.full_like else {}
result = likefunc(b, dtype=dtype, **kwargs)
if dtype is str:
assert result.strides == (16, 4)
else:
# dtype is bytes
assert result.strides == (4, 1)
|
TestLikeFuncs
|
python
|
docker__docker-py
|
tests/unit/api_container_test.py
|
{
"start": 466,
"end": 4354
}
|
class ____(BaseAPIClientTest):
def test_start_container(self):
self.client.start(fake_api.FAKE_CONTAINER_ID)
args = fake_request.call_args
assert args[0][1] == (url_prefix + 'containers/' +
fake_api.FAKE_CONTAINER_ID + '/start')
assert 'data' not in args[1]
assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_start_container_none(self):
with pytest.raises(ValueError) as excinfo:
self.client.start(container=None)
assert str(excinfo.value) == 'Resource ID was not provided'
with pytest.raises(ValueError) as excinfo:
self.client.start(None)
assert str(excinfo.value) == 'Resource ID was not provided'
def test_start_container_regression_573(self):
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
def test_start_container_with_lxc_conf(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
def test_start_container_with_lxc_conf_compat(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
def test_start_container_with_binds_ro(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {
"bind": '/mnt',
"ro": True
}
}
)
def test_start_container_with_binds_rw(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {"bind": '/mnt', "ro": False}
}
)
def test_start_container_with_port_binds(self):
self.maxDiff = None
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
def test_start_container_with_links(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'}
)
def test_start_container_with_multiple_links(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
'path1': 'alias1',
'path2': 'alias2'
}
)
def test_start_container_with_links_as_list_of_tuples(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[('path', 'alias')])
def test_start_container_privileged(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
def test_start_container_with_dict_instead_of_id(self):
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
args = fake_request.call_args
assert args[0][1] == (url_prefix + 'containers/' +
fake_api.FAKE_CONTAINER_ID + '/start')
assert 'data' not in args[1]
assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
|
StartContainerTest
|
python
|
pytorch__pytorch
|
torch/_dynamo/utils.py
|
{
"start": 140019,
"end": 140676
}
|
class ____:
"""Convert obj from torch.Tensor to tnp.ndarray and call method. Then convert result back to torch.Tensor."""
def __init__(self, method: str) -> None:
self.method = method
self.__name__ = "wrapped_" + self.method
def __repr__(self) -> str:
return f"<Wrapped method <original {self.method}>>"
def __call__(self, *args: Any, **kwargs: Any) -> Any:
obj = args[0]
if isinstance(obj, torch.Tensor):
obj = tnp.ndarray(obj)
method_callable = getattr(obj, self.method)
out = method_callable(*args[1:], **kwargs)
return numpy_to_tensor(out)
|
numpy_method_wrapper
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/chains/base.py
|
{
"start": 1115,
"end": 31346
}
|
class ____(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
"""Abstract base class for creating structured sequences of calls to components.
Chains should be used to encode a sequence of calls to components like
models, document retrievers, other chains, etc., and provide a simple interface
to this sequence.
The Chain interface makes it easy to create apps that are:
- Stateful: add Memory to any Chain to give it state,
- Observable: pass Callbacks to a Chain to execute additional functionality,
like logging, outside the main sequence of component calls,
- Composable: the Chain API is flexible enough that it is easy to combine
Chains with other components, including other Chains.
The main methods exposed by chains are:
- `__call__`: Chains are callable. The `__call__` method is the primary way to
execute a Chain. This takes inputs as a dictionary and returns a
dictionary output.
- `run`: A convenience method that takes inputs as args/kwargs and returns the
output as a string or object. This method can only be used for a subset of
chains and cannot return as rich of an output as `__call__`.
"""
memory: BaseMemory | None = None
"""Optional memory object.
Memory is a class that gets called at the start
and at the end of every chain. At the start, memory loads variables and passes
them along in the chain. At the end, it saves any returned variables.
There are many different types of memory - please see memory docs
for the full catalog."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Optional list of callback handlers (or callback manager).
Callback handlers are called throughout the lifecycle of a call to a chain,
starting with on_chain_start, ending with on_chain_end or on_chain_error.
Each custom chain can optionally call additional callback methods, see Callback docs
for full details."""
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether or not run in verbose mode. In verbose mode, some intermediate logs
will be printed to the console. Defaults to the global `verbose` value,
accessible via `langchain.globals.get_verbose()`."""
tags: list[str] | None = None
"""Optional list of tags associated with the chain.
These tags will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
metadata: builtins.dict[str, Any] | None = None
"""Optional metadata associated with the chain.
This metadata will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
callback_manager: BaseCallbackManager | None = Field(default=None, exclude=True)
"""[DEPRECATED] Use `callbacks` instead."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@override
def get_input_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
# This is correct, but pydantic typings/mypy don't think so.
return create_model("ChainInput", **dict.fromkeys(self.input_keys, (Any, None)))
@override
def get_output_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
# This is correct, but pydantic typings/mypy don't think so.
return create_model(
"ChainOutput",
**dict.fromkeys(self.output_keys, (Any, None)),
)
@override
def invoke(
self,
input: dict[str, Any],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> dict[str, Any]:
config = ensure_config(config)
callbacks = config.get("callbacks")
tags = config.get("tags")
metadata = config.get("metadata")
run_name = config.get("run_name") or self.get_name()
run_id = config.get("run_id")
include_run_info = kwargs.get("include_run_info", False)
return_only_outputs = kwargs.get("return_only_outputs", False)
inputs = self.prep_inputs(input)
callback_manager = CallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
run_manager = callback_manager.on_chain_start(
None,
inputs,
run_id,
name=run_name,
)
try:
self._validate_inputs(inputs)
outputs = (
self._call(inputs, run_manager=run_manager)
if new_arg_supported
else self._call(inputs)
)
final_outputs: dict[str, Any] = self.prep_outputs(
inputs,
outputs,
return_only_outputs,
)
except BaseException as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(outputs)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
@override
async def ainvoke(
self,
input: dict[str, Any],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> dict[str, Any]:
config = ensure_config(config)
callbacks = config.get("callbacks")
tags = config.get("tags")
metadata = config.get("metadata")
run_name = config.get("run_name") or self.get_name()
run_id = config.get("run_id")
include_run_info = kwargs.get("include_run_info", False)
return_only_outputs = kwargs.get("return_only_outputs", False)
inputs = await self.aprep_inputs(input)
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
run_manager = await callback_manager.on_chain_start(
None,
inputs,
run_id,
name=run_name,
)
try:
self._validate_inputs(inputs)
outputs = (
await self._acall(inputs, run_manager=run_manager)
if new_arg_supported
else await self._acall(inputs)
)
final_outputs: dict[str, Any] = await self.aprep_outputs(
inputs,
outputs,
return_only_outputs,
)
except BaseException as e:
await run_manager.on_chain_error(e)
raise
await run_manager.on_chain_end(outputs)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
@property
def _chain_type(self) -> str:
msg = "Saving not supported for this chain type."
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_callback_manager_deprecation(cls, values: dict) -> Any:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
if values.get("callbacks") is not None:
msg = (
"Cannot specify both callback_manager and callbacks. "
"callback_manager is deprecated, callbacks is the preferred "
"parameter to pass in."
)
raise ValueError(msg)
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
stacklevel=4,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@field_validator("verbose", mode="before")
@classmethod
def set_verbose(
cls,
verbose: bool | None, # noqa: FBT001
) -> bool:
"""Set the chain verbosity.
Defaults to the global setting if not specified by the user.
"""
if verbose is None:
return _get_verbosity()
return verbose
@property
@abstractmethod
def input_keys(self) -> list[str]:
"""Keys expected to be in the chain input."""
@property
@abstractmethod
def output_keys(self) -> list[str]:
"""Keys expected to be in the chain output."""
def _validate_inputs(self, inputs: Any) -> None:
"""Check that all inputs are present."""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
msg = (
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
raise ValueError(msg)
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
msg = f"Missing some input keys: {missing_keys}"
raise ValueError(msg)
def _validate_outputs(self, outputs: dict[str, Any]) -> None:
missing_keys = set(self.output_keys).difference(outputs)
if missing_keys:
msg = f"Missing some output keys: {missing_keys}"
raise ValueError(msg)
@abstractmethod
def _call(
self,
inputs: builtins.dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> builtins.dict[str, Any]:
"""Execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.__call__`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
async def _acall(
self,
inputs: builtins.dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> builtins.dict[str, Any]:
"""Asynchronously execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.acall`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
return await run_in_executor(
None,
self._call,
inputs,
run_manager.get_sync() if run_manager else None,
)
@deprecated("0.1.0", alternative="invoke", removal="1.0")
def __call__(
self,
inputs: dict[str, Any] | Any,
return_only_outputs: bool = False, # noqa: FBT001,FBT002
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
run_name: str | None = None,
include_run_info: bool = False,
) -> dict[str, Any]:
"""Execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If `True`, only new keys generated by this chain will be
returned. If `False`, both input keys and new keys generated by this
chain will be returned.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
run_name: Optional name for this run of the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
config = {
"callbacks": callbacks,
"tags": tags,
"metadata": metadata,
"run_name": run_name,
}
return self.invoke(
inputs,
cast("RunnableConfig", {k: v for k, v in config.items() if v is not None}),
return_only_outputs=return_only_outputs,
include_run_info=include_run_info,
)
@deprecated("0.1.0", alternative="ainvoke", removal="1.0")
async def acall(
self,
inputs: dict[str, Any] | Any,
return_only_outputs: bool = False, # noqa: FBT001,FBT002
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
run_name: str | None = None,
include_run_info: bool = False,
) -> dict[str, Any]:
"""Asynchronously execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If `True`, only new keys generated by this chain will be
returned. If `False`, both input keys and new keys generated by this
chain will be returned.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
run_name: Optional name for this run of the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
config = {
"callbacks": callbacks,
"tags": tags,
"metadata": metadata,
"run_name": run_name,
}
return await self.ainvoke(
inputs,
cast("RunnableConfig", {k: v for k, v in config.items() if k is not None}),
return_only_outputs=return_only_outputs,
include_run_info=include_run_info,
)
def prep_outputs(
self,
inputs: dict[str, str],
outputs: dict[str, str],
return_only_outputs: bool = False, # noqa: FBT001,FBT002
) -> dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If `False`,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
return {**inputs, **outputs}
async def aprep_outputs(
self,
inputs: dict[str, str],
outputs: dict[str, str],
return_only_outputs: bool = False, # noqa: FBT001,FBT002
) -> dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If `False`,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
await self.memory.asave_context(inputs, outputs)
if return_only_outputs:
return outputs
return {**inputs, **outputs}
def prep_inputs(self, inputs: dict[str, Any] | Any) -> dict[str, str]:
"""Prepare chain inputs, including adding inputs from memory.
Args:
inputs: Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
Returns:
A dictionary of all inputs, including those added by the chain's memory.
"""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
inputs = {next(iter(_input_keys)): inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
return inputs
async def aprep_inputs(self, inputs: dict[str, Any] | Any) -> dict[str, str]:
"""Prepare chain inputs, including adding inputs from memory.
Args:
inputs: Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
Returns:
A dictionary of all inputs, including those added by the chain's memory.
"""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
inputs = {next(iter(_input_keys)): inputs}
if self.memory is not None:
external_context = await self.memory.aload_memory_variables(inputs)
inputs = dict(inputs, **external_context)
return inputs
@property
def _run_output_key(self) -> str:
if len(self.output_keys) != 1:
msg = (
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
raise ValueError(msg)
return self.output_keys[0]
@deprecated("0.1.0", alternative="invoke", removal="1.0")
def run(
self,
*args: Any,
callbacks: Callbacks = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
```python
# Suppose we have a single-input chain that takes a 'question' string:
chain.run("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
chain.run(question=question, context=context)
# -> "The temperature in Boise is..."
```
"""
# Run at start to make sure this is possible/defined
_output_key = self._run_output_key
if args and not kwargs:
if len(args) != 1:
msg = "`run` supports only one positional argument."
raise ValueError(msg)
return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if kwargs and not args:
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if not kwargs and not args:
msg = (
"`run` supported with either positional arguments or keyword arguments,"
" but none were provided."
)
raise ValueError(msg)
msg = (
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
raise ValueError(msg)
@deprecated("0.1.0", alternative="ainvoke", removal="1.0")
async def arun(
self,
*args: Any,
callbacks: Callbacks = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
```python
# Suppose we have a single-input chain that takes a 'question' string:
await chain.arun("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
await chain.arun(question=question, context=context)
# -> "The temperature in Boise is..."
```
"""
if len(self.output_keys) != 1:
msg = (
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
raise ValueError(msg)
if args and not kwargs:
if len(args) != 1:
msg = "`run` supports only one positional argument."
raise ValueError(msg)
return (
await self.acall(
args[0],
callbacks=callbacks,
tags=tags,
metadata=metadata,
)
)[self.output_keys[0]]
if kwargs and not args:
return (
await self.acall(
kwargs,
callbacks=callbacks,
tags=tags,
metadata=metadata,
)
)[self.output_keys[0]]
msg = (
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
raise ValueError(msg)
def dict(self, **kwargs: Any) -> dict:
"""Dictionary representation of chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
**kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`
method.
Returns:
A dictionary representation of the chain.
Example:
```python
chain.model_dump(exclude_unset=True)
# -> {"_type": "foo", "verbose": False, ...}
```
"""
_dict = super().model_dump(**kwargs)
with contextlib.suppress(NotImplementedError):
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Path | str) -> None:
"""Save the chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
file_path: Path to file to save the chain to.
Example:
```python
chain.save(file_path="path/chain.yaml")
```
"""
if self.memory is not None:
msg = "Saving of memory is not yet supported."
raise ValueError(msg)
# Fetch dictionary to save
chain_dict = self.model_dump()
if "_type" not in chain_dict:
msg = f"Chain {self} does not support saving."
raise NotImplementedError(msg)
# Convert file to Path object.
save_path = Path(file_path) if isinstance(file_path, str) else file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
if save_path.suffix == ".json":
with save_path.open("w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix.endswith((".yaml", ".yml")):
with save_path.open("w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
@deprecated("0.1.0", alternative="batch", removal="1.0")
def apply(
self,
input_list: list[builtins.dict[str, Any]],
callbacks: Callbacks = None,
) -> list[builtins.dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs, callbacks=callbacks) for inputs in input_list]
|
Chain
|
python
|
allegroai__clearml
|
clearml/backend_interface/task/repo/scriptinfo.py
|
{
"start": 832,
"end": 12902
}
|
class ____(object):
_detailed_import_report = deferred_config("development.detailed_import_report", False)
_max_requirements_size = 512 * 1024
_packages_remove_version = ("setuptools",)
_ignore_packages = set()
@classmethod
def _get_logger(cls) -> logging.Logger:
return get_logger("Repository Detection")
def __init__(self, root_folder: str) -> None:
self._root_folder = root_folder
def get_requirements(
self,
entry_point_filename: Optional[str] = None,
add_missing_installed_packages: bool = False,
detailed_req_report: Optional[bool] = None,
) -> Tuple[str, str]:
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail
from ....utilities.pigar.__main__ import GenerateReqs
installed_pkgs = self._remove_package_versions(get_installed_pkgs_detail(), self._packages_remove_version)
gr = GenerateReqs(
save_path="",
project_path=self._root_folder,
installed_pkgs=installed_pkgs,
ignores=[
".git",
".hg",
".idea",
"__pycache__",
".ipynb_checkpoints",
"site-packages",
"dist-packages",
],
)
reqs, try_imports, guess, local_pks = gr.extract_reqs(
module_callback=ScriptRequirements.add_trains_used_packages,
entry_point_filename=entry_point_filename,
)
if add_missing_installed_packages and guess:
for k in guess:
if k not in reqs:
reqs[k] = guess[k]
return self.create_requirements_txt(reqs, local_pks, detailed=detailed_req_report)
except Exception as ex:
self._get_logger().warning("Failed auto-generating package requirements: {}".format(ex))
return "", ""
@staticmethod
def add_trains_used_packages(modules: Any) -> Any:
# hack: forcefully insert storage modules if we have them
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import boto3 # noqa: F401
modules.add("boto3", "clearml.storage", 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from google.cloud import storage # noqa: F401
modules.add("google_cloud_storage", "clearml.storage", 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from azure.storage.blob import ContentSettings # noqa: F401
modules.add("azure_storage_blob", "clearml.storage", 0)
except Exception:
pass
# bugfix, replace sklearn with scikit-learn name
if "sklearn" in modules:
sklearn = modules.pop("sklearn", {})
for fname, lines in sklearn.items():
modules.add("scikit_learn", fname, lines)
# bugfix, replace sklearn with scikit-learn name
if "skimage" in modules:
skimage = modules.pop("skimage", {})
for fname, lines in skimage.items():
modules.add("scikit_image", fname, lines)
if "tensorflow-intel" in modules:
tfmodule = modules.pop("tensorflow-intel", {})
for fname, lines in tfmodule.items():
modules.add("tensorflow", fname, lines)
# if we have torch, and it supports tensorboard, we should add that as well
# (because it will not be detected automatically)
if "torch" in modules and "tensorboard" not in modules and "tensorboardX" not in modules:
# noinspection PyBroadException
try:
# see if this version of torch support tensorboard
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch.utils.tensorboard # noqa: F401
# noinspection PyPackageRequirements,PyUnresolvedReferences
import tensorboard # noqa: F401
modules.add("tensorboard", "torch", 0)
except Exception:
pass
# remove setuptools, we should not specify this module version. It is installed by default
if "setuptools" in modules:
modules.pop("setuptools", {})
# add forced requirements:
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
for package, version in Task._force_requirements.items():
modules.add(package, "clearml", 0)
except Exception:
pass
return modules
@staticmethod
def create_requirements_txt(
reqs: Dict[str, Any],
local_pks: Optional[Any] = None,
detailed: Optional[bool] = None,
) -> Tuple[str, str]:
# write requirements.txt
if detailed is None:
detailed = ScriptRequirements._detailed_import_report
# noinspection PyBroadException
try:
conda_requirements = ""
conda_prefix = os.environ.get("CONDA_PREFIX")
if conda_prefix and not conda_prefix.endswith(os.path.sep):
conda_prefix += os.path.sep
if conda_prefix and sys.executable.startswith(conda_prefix):
conda_packages_json = get_command_output(["conda", "list", "--json"])
conda_packages_json = json.loads(conda_packages_json)
reqs_lower = {k.lower(): (k, v) for k, v in reqs.items()}
for r in conda_packages_json:
# the exception is cudatoolkit which we want to log anyhow
if r.get("name") == "cudatoolkit" and r.get("version"):
conda_requirements += "{0} {1} {2}\n".format(r.get("name"), "==", r.get("version"))
continue
# check if this is a pypi package, if it is, leave it outside
if not r.get("channel") or r.get("channel") == "pypi":
continue
# check if we have it in our required packages
name = r["name"].lower()
# hack support pytorch/torch different naming convention
if name == "pytorch":
name = "torch"
k, v = None, None
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
else:
name = name.replace("-", "_")
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
if k and v is not None:
if v.version:
conda_requirements += "{0} {1} {2}\n".format(k, "==", v.version)
else:
conda_requirements += "{0}\n".format(k)
except Exception:
conda_requirements = ""
# add forced requirements:
forced_packages = {}
ignored_packages = ScriptRequirements._ignore_packages
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
forced_packages = copy(Task._force_requirements)
# noinspection PyProtectedMember
ignored_packages = Task._ignore_requirements | ignored_packages
except Exception:
pass
# python version header
requirements_txt = "# Python " + sys.version.replace("\n", " ").replace("\r", " ") + "\n"
if local_pks:
requirements_txt += "\n# Local modules found - skipping:\n"
for k, v in local_pks.sorted_items():
if v.version:
requirements_txt += "# {0} == {1}\n".format(k, v.version)
else:
requirements_txt += "# {0}\n".format(k)
# requirement summary
requirements_txt += "\n"
for k, v in reqs.sorted_items():
if k in ignored_packages or k.lower() in ignored_packages:
continue
version = v.version if v else None
if k in forced_packages:
forced_version = forced_packages.pop(k, None)
if forced_version is not None:
version = forced_version
# requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
requirements_txt += ScriptRequirements._make_req_line(k, version or None)
# add forced requirements that we could not find installed on the system
for k in sorted(forced_packages.keys()):
requirements_txt += ScriptRequirements._make_req_line(k, forced_packages.get(k))
requirements_txt_packages_only = requirements_txt
if detailed:
requirements_txt_packages_only = (
requirements_txt + "\n# Skipping detailed import analysis, it is too large\n"
)
# requirements details (in comments)
requirements_txt += "\n# Detailed import analysis\n# **************************\n"
if local_pks:
for k, v in local_pks.sorted_items():
requirements_txt += "\n"
requirements_txt += "# IMPORT LOCAL PACKAGE {0}\n".format(k)
requirements_txt += "".join(["# {0}\n".format(c) for c in v.comments.sorted_items()])
for k, v in reqs.sorted_items():
if not v:
continue
requirements_txt += "\n"
if k == "-e":
requirements_txt += "# IMPORT PACKAGE {0} {1}\n".format(k, v.version)
else:
requirements_txt += "# IMPORT PACKAGE {0}\n".format(k)
requirements_txt += "".join(["# {0}\n".format(c) for c in v.comments.sorted_items()])
# make sure we do not exceed the size a size limit
return (
requirements_txt
if len(requirements_txt) < ScriptRequirements._max_requirements_size
else requirements_txt_packages_only,
conda_requirements,
)
@staticmethod
def _make_req_line(k: str, version: str) -> str:
requirements_txt = ""
if k == "-e" and version:
requirements_txt += "{0}\n".format(version)
elif k.startswith("-e "):
requirements_txt += "{0} {1}\n".format(k.replace("-e ", "", 1), version or "")
elif version and str(version or " ").strip()[0].isdigit():
requirements_txt += "{0} {1} {2}\n".format(k, "==", version)
elif version and str(version).strip():
requirements_txt += "{0} {1}\n".format(k, version)
else:
requirements_txt += "{0}\n".format(k)
return requirements_txt
@staticmethod
def _remove_package_versions(
installed_pkgs: Dict[str, Union[Tuple[str, Optional[str]], Dict[str, Any]]],
package_names_to_remove_version: Tuple[str],
) -> Dict[str, Union[Tuple[str, Optional[str]], Dict[str, Any]]]:
def _internal(
_installed_pkgs: Dict[str, Union[Tuple[str, Optional[str]], Dict[str, Any]]]
) -> Dict[str, Union[Tuple[str, Optional[str]], Dict[str, Any]]]:
return {
k: (v[0], None if str(k) in package_names_to_remove_version else v[1])
if not isinstance(v, dict)
else _internal(v)
for k, v in _installed_pkgs.items()
}
return _internal(installed_pkgs)
|
ScriptRequirements
|
python
|
getsentry__sentry
|
src/sentry/integrations/api/serializers/models/external_actor.py
|
{
"start": 711,
"end": 2196
}
|
class ____(Serializer):
def get_attrs(
self,
item_list: Sequence[ExternalActor],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> MutableMapping[ExternalActor, MutableMapping[str, Any]]:
# create a mapping of external actor to a set of attributes.
# Those attributes are either {"user": user.id} or {"team": team.id}.
return {
external_actor: (
{"team": external_actor.team_id}
if external_actor.team_id is not None
else {"user": external_actor.user_id}
)
for external_actor in item_list
}
def serialize(
self,
obj: ExternalActor,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
key: str | None = None,
**kwargs: Any,
) -> ExternalActorResponse:
provider = get_provider_string(obj.provider)
data: ExternalActorResponse = {
"id": str(obj.id),
"provider": provider,
"externalName": obj.external_name,
"integrationId": str(obj.integration_id),
}
if obj.external_id:
data["externalId"] = obj.external_id
# Extra context `key` tells the API how to resolve actor_id.
if key == "user":
data["userId"] = str(attrs[key])
elif key == "team":
data["teamId"] = str(attrs[key])
return data
|
ExternalActorSerializer
|
python
|
etianen__django-reversion
|
tests/test_app/models.py
|
{
"start": 1414,
"end": 1536
}
|
class ____(models.Model):
name = models.CharField(
max_length=191,
default="v1",
)
|
TestModelRelated
|
python
|
plotly__plotly.py
|
_plotly_utils/basevalidators.py
|
{
"start": 76832,
"end": 77446
}
|
class ____(CompoundValidator):
"""
This is a special validator to allow compound title properties
(e.g. layout.title, layout.xaxis.title, etc.) to be set as strings
or numbers. These strings are mapped to the 'text' property of the
compound validator.
"""
def __init__(self, *args, **kwargs):
super(TitleValidator, self).__init__(*args, **kwargs)
def validate_coerce(self, v, skip_invalid=False):
if isinstance(v, (str, int, float)):
v = {"text": v}
return super(TitleValidator, self).validate_coerce(v, skip_invalid=skip_invalid)
|
TitleValidator
|
python
|
chroma-core__chroma
|
chromadb/test/conftest.py
|
{
"start": 27688,
"end": 27770
}
|
class ____(AsyncClientCreator):
pass
@async_class_to_sync
|
AsyncClientCreatorSync
|
python
|
PyCQA__pylint
|
tests/functional/m/member/member_checks.py
|
{
"start": 1995,
"end": 2062
}
|
class ____:
"""No no-member should be emitted for mixins."""
|
Mixin
|
python
|
keras-team__keras
|
keras/src/optimizers/adamw_test.py
|
{
"start": 170,
"end": 3540
}
|
class ____(testing.TestCase):
def test_config(self):
optimizer = AdamW(
learning_rate=0.5,
weight_decay=0.008,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
amsgrad=True,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = AdamW(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.4980, 1.4960, 2.494, 3.492], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_weight_decay_is_none(self):
with self.assertRaisesRegex(
ValueError,
"Argument `weight_decay` must be a float. "
"Received: weight_decay=None",
):
AdamW(learning_rate=1.0, weight_decay=None)
def test_correctness_with_golden(self):
optimizer = AdamW(learning_rate=1.0, weight_decay=0.5, epsilon=2)
x = backend.Variable(np.ones([10], dtype="float32"))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998],
[0.2486, 0.2475, 0.2463, 0.2451, 0.244, 0.2428, 0.2417, 0.2405, 0.2394, 0.2382],
[0.1223, 0.1198, 0.1174, 0.1149, 0.1124, 0.11, 0.1075, 0.1051, 0.1027, 0.1003],
[0.0586, 0.0549, 0.0512, 0.0475, 0.0439, 0.0402, 0.0366, 0.033, 0.0294, 0.0258],
[0.0263, 0.0215, 0.0167, 0.012, 0.0073, 0.0026, -0.0021, -0.0067, -0.0113, -0.0159]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = AdamW(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = AdamW(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
AdamWTest
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_vendor/rich/_win32_console.py
|
{
"start": 10432,
"end": 22800
}
|
class ____:
"""This class allows interaction with the legacy Windows Console API. It should only be used in the context
of environments where virtual terminal processing is not available. However, if it is used in a Windows environment,
the entire API should work.
Args:
file (IO[str]): The file which the Windows Console API HANDLE is retrieved from, defaults to sys.stdout.
"""
BRIGHT_BIT = 8
# Indices are ANSI color numbers, values are the corresponding Windows Console API color numbers
ANSI_TO_WINDOWS = [
0, # black The Windows colours are defined in wincon.h as follows:
4, # red define FOREGROUND_BLUE 0x0001 -- 0000 0001
2, # green define FOREGROUND_GREEN 0x0002 -- 0000 0010
6, # yellow define FOREGROUND_RED 0x0004 -- 0000 0100
1, # blue define FOREGROUND_INTENSITY 0x0008 -- 0000 1000
5, # magenta define BACKGROUND_BLUE 0x0010 -- 0001 0000
3, # cyan define BACKGROUND_GREEN 0x0020 -- 0010 0000
7, # white define BACKGROUND_RED 0x0040 -- 0100 0000
8, # bright black (grey) define BACKGROUND_INTENSITY 0x0080 -- 1000 0000
12, # bright red
10, # bright green
14, # bright yellow
9, # bright blue
13, # bright magenta
11, # bright cyan
15, # bright white
]
def __init__(self, file: "IO[str]") -> None:
handle = GetStdHandle(STDOUT)
self._handle = handle
default_text = GetConsoleScreenBufferInfo(handle).wAttributes
self._default_text = default_text
self._default_fore = default_text & 7
self._default_back = (default_text >> 4) & 7
self._default_attrs = self._default_fore | (self._default_back << 4)
self._file = file
self.write = file.write
self.flush = file.flush
@property
def cursor_position(self) -> WindowsCoordinates:
"""Returns the current position of the cursor (0-based)
Returns:
WindowsCoordinates: The current cursor position.
"""
coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition
return WindowsCoordinates(row=coord.Y, col=coord.X)
@property
def screen_size(self) -> WindowsCoordinates:
"""Returns the current size of the console screen buffer, in character columns and rows
Returns:
WindowsCoordinates: The width and height of the screen as WindowsCoordinates.
"""
screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize
return WindowsCoordinates(row=screen_size.Y, col=screen_size.X)
def write_text(self, text: str) -> None:
"""Write text directly to the terminal without any modification of styles
Args:
text (str): The text to write to the console
"""
self.write(text)
self.flush()
def write_styled(self, text: str, style: Style) -> None:
"""Write styled text to the terminal.
Args:
text (str): The text to write
style (Style): The style of the text
"""
color = style.color
bgcolor = style.bgcolor
if style.reverse:
color, bgcolor = bgcolor, color
if color:
fore = color.downgrade(ColorSystem.WINDOWS).number
fore = fore if fore is not None else 7 # Default to ANSI 7: White
if style.bold:
fore = fore | self.BRIGHT_BIT
if style.dim:
fore = fore & ~self.BRIGHT_BIT
fore = self.ANSI_TO_WINDOWS[fore]
else:
fore = self._default_fore
if bgcolor:
back = bgcolor.downgrade(ColorSystem.WINDOWS).number
back = back if back is not None else 0 # Default to ANSI 0: Black
back = self.ANSI_TO_WINDOWS[back]
else:
back = self._default_back
assert fore is not None
assert back is not None
SetConsoleTextAttribute(
self._handle, attributes=ctypes.c_ushort(fore | (back << 4))
)
self.write_text(text)
SetConsoleTextAttribute(self._handle, attributes=self._default_text)
def move_cursor_to(self, new_position: WindowsCoordinates) -> None:
"""Set the position of the cursor
Args:
new_position (WindowsCoordinates): The WindowsCoordinates representing the new position of the cursor.
"""
if new_position.col < 0 or new_position.row < 0:
return
SetConsoleCursorPosition(self._handle, coords=new_position)
def erase_line(self) -> None:
"""Erase all content on the line the cursor is currently located at"""
screen_size = self.screen_size
cursor_position = self.cursor_position
cells_to_erase = screen_size.col
start_coordinates = WindowsCoordinates(row=cursor_position.row, col=0)
FillConsoleOutputCharacter(
self._handle, " ", length=cells_to_erase, start=start_coordinates
)
FillConsoleOutputAttribute(
self._handle,
self._default_attrs,
length=cells_to_erase,
start=start_coordinates,
)
def erase_end_of_line(self) -> None:
"""Erase all content from the cursor position to the end of that line"""
cursor_position = self.cursor_position
cells_to_erase = self.screen_size.col - cursor_position.col
FillConsoleOutputCharacter(
self._handle, " ", length=cells_to_erase, start=cursor_position
)
FillConsoleOutputAttribute(
self._handle,
self._default_attrs,
length=cells_to_erase,
start=cursor_position,
)
def erase_start_of_line(self) -> None:
"""Erase all content from the cursor position to the start of that line"""
row, col = self.cursor_position
start = WindowsCoordinates(row, 0)
FillConsoleOutputCharacter(self._handle, " ", length=col, start=start)
FillConsoleOutputAttribute(
self._handle, self._default_attrs, length=col, start=start
)
def move_cursor_up(self) -> None:
"""Move the cursor up a single cell"""
cursor_position = self.cursor_position
SetConsoleCursorPosition(
self._handle,
coords=WindowsCoordinates(
row=cursor_position.row - 1, col=cursor_position.col
),
)
def move_cursor_down(self) -> None:
"""Move the cursor down a single cell"""
cursor_position = self.cursor_position
SetConsoleCursorPosition(
self._handle,
coords=WindowsCoordinates(
row=cursor_position.row + 1,
col=cursor_position.col,
),
)
def move_cursor_forward(self) -> None:
"""Move the cursor forward a single cell. Wrap to the next line if required."""
row, col = self.cursor_position
if col == self.screen_size.col - 1:
row += 1
col = 0
else:
col += 1
SetConsoleCursorPosition(
self._handle, coords=WindowsCoordinates(row=row, col=col)
)
def move_cursor_to_column(self, column: int) -> None:
"""Move cursor to the column specified by the zero-based column index, staying on the same row
Args:
column (int): The zero-based column index to move the cursor to.
"""
row, _ = self.cursor_position
SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column))
def move_cursor_backward(self) -> None:
"""Move the cursor backward a single cell. Wrap to the previous line if required."""
row, col = self.cursor_position
if col == 0:
row -= 1
col = self.screen_size.col - 1
else:
col -= 1
SetConsoleCursorPosition(
self._handle, coords=WindowsCoordinates(row=row, col=col)
)
def hide_cursor(self) -> None:
"""Hide the cursor"""
current_cursor_size = self._get_cursor_size()
invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0)
SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor)
def show_cursor(self) -> None:
"""Show the cursor"""
current_cursor_size = self._get_cursor_size()
visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1)
SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor)
def set_title(self, title: str) -> None:
"""Set the title of the terminal window
Args:
title (str): The new title of the console window
"""
assert len(title) < 255, "Console title must be less than 255 characters"
SetConsoleTitle(title)
def _get_cursor_size(self) -> int:
"""Get the percentage of the character cell that is filled by the cursor"""
cursor_info = CONSOLE_CURSOR_INFO()
GetConsoleCursorInfo(self._handle, cursor_info=cursor_info)
return int(cursor_info.dwSize)
if __name__ == "__main__":
handle = GetStdHandle()
from pipenv.patched.pip._vendor.rich.console import Console
console = Console()
term = LegacyWindowsTerm(sys.stdout)
term.set_title("Win32 Console Examples")
style = Style(color="black", bgcolor="red")
heading = Style.parse("black on green")
# Check colour output
console.rule("Checking colour output")
console.print("[on red]on red!")
console.print("[blue]blue!")
console.print("[yellow]yellow!")
console.print("[bold yellow]bold yellow!")
console.print("[bright_yellow]bright_yellow!")
console.print("[dim bright_yellow]dim bright_yellow!")
console.print("[italic cyan]italic cyan!")
console.print("[bold white on blue]bold white on blue!")
console.print("[reverse bold white on blue]reverse bold white on blue!")
console.print("[bold black on cyan]bold black on cyan!")
console.print("[black on green]black on green!")
console.print("[blue on green]blue on green!")
console.print("[white on black]white on black!")
console.print("[black on white]black on white!")
console.print("[#1BB152 on #DA812D]#1BB152 on #DA812D!")
# Check cursor movement
console.rule("Checking cursor movement")
console.print()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("went back and wrapped to prev line")
time.sleep(1)
term.move_cursor_up()
term.write_text("we go up")
time.sleep(1)
term.move_cursor_down()
term.write_text("and down")
time.sleep(1)
term.move_cursor_up()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("we went up and back 2")
time.sleep(1)
term.move_cursor_down()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("we went down and back 2")
time.sleep(1)
# Check erasing of lines
term.hide_cursor()
console.print()
console.rule("Checking line erasing")
console.print("\n...Deleting to the start of the line...")
term.write_text("The red arrow shows the cursor location, and direction of erase")
time.sleep(1)
term.move_cursor_to_column(16)
term.write_styled("<", Style.parse("black on red"))
term.move_cursor_backward()
time.sleep(1)
term.erase_start_of_line()
time.sleep(1)
console.print("\n\n...And to the end of the line...")
term.write_text("The red arrow shows the cursor location, and direction of erase")
time.sleep(1)
term.move_cursor_to_column(16)
term.write_styled(">", Style.parse("black on red"))
time.sleep(1)
term.erase_end_of_line()
time.sleep(1)
console.print("\n\n...Now the whole line will be erased...")
term.write_styled("I'm going to disappear!", style=Style.parse("black on cyan"))
time.sleep(1)
term.erase_line()
term.show_cursor()
print("\n")
|
LegacyWindowsTerm
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/processors/test_data_condition_group.py
|
{
"start": 5051,
"end": 7371
}
|
class ____(TestEvaluationConditionCase):
def test_evaluate_data_conditions__passes_all(self) -> None:
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.TRUE,
condition_results=[
ProcessedDataCondition(
logic_result=TriggerResult.TRUE,
condition=self.data_condition,
result=DetectorPriorityLevel.HIGH,
),
ProcessedDataCondition(
logic_result=TriggerResult.TRUE,
condition=self.data_condition_two,
result=DetectorPriorityLevel.LOW,
),
],
)
result = evaluate_data_conditions(
self.get_conditions_to_evaluate(10),
self.data_condition_group.logic_type,
)
assert result == expected_result
def test_evaluate_data_conditions__passes_one(self) -> None:
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.TRUE,
condition_results=[
ProcessedDataCondition(
logic_result=TriggerResult.TRUE,
condition=self.data_condition_two,
result=DetectorPriorityLevel.LOW,
)
],
)
result = evaluate_data_conditions(
self.get_conditions_to_evaluate(4),
self.data_condition_group.logic_type,
)
assert result == expected_result
def test_evaluate_data_conditions__fails_all(self) -> None:
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.FALSE,
condition_results=[],
)
result = evaluate_data_conditions(
self.get_conditions_to_evaluate(1),
self.data_condition_group.logic_type,
)
assert result == expected_result
def test_evaluate_data_conditions__passes_without_conditions(self) -> None:
result = evaluate_data_conditions([], self.data_condition_group.logic_type)
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.TRUE,
condition_results=[],
)
assert result == expected_result
|
TestEvaluateConditionGroupTypeAny
|
python
|
pola-rs__polars
|
py-polars/src/polars/_cpu_check.py
|
{
"start": 4491,
"end": 4648
}
|
class ____(ctypes.Structure):
_fields_: ClassVar[list[tuple[str, type]]] = [
(r, c_uint32) for r in ("eax", "ebx", "ecx", "edx")
]
|
CPUID_struct
|
python
|
getsentry__sentry
|
src/sentry/notifications/additional_attachment_manager.py
|
{
"start": 730,
"end": 2198
}
|
class ____:
def __init__(self) -> None:
self.attachment_generators: MutableMapping[ExternalProviders, GetAttachment] = {}
# need to update types for additional providers
def get_additional_attachment(
self,
integration: Integration | RpcIntegration,
organization: Organization | RpcOrganization,
) -> list[SlackBlock] | None:
# look up the generator by the provider but only accepting slack for now
provider = validate_provider(integration.provider, {ExternalProviders.SLACK})
attachment_generator = self.attachment_generators.get(provider)
if attachment_generator is None:
return None
return attachment_generator(integration, organization)
def register_additional_attachment_generator(
self,
provider: ExternalProviders,
) -> Callable[[GetAttachment], GetAttachment]:
if self.attachment_generators.get(provider):
raise AttachmentGeneratorAlreadySetException()
def wrapped(attachment_generator: GetAttachment) -> GetAttachment:
self.attachment_generators[provider] = attachment_generator
return attachment_generator
return wrapped
# make instance and export it
manager = AdditionalAttachmentManager()
register_additional_attachment_generator = manager.register_additional_attachment_generator
get_additional_attachment = manager.get_additional_attachment
|
AdditionalAttachmentManager
|
python
|
wandb__wandb
|
wandb/filesync/dir_watcher.py
|
{
"start": 2963,
"end": 6011
}
|
class ____(FileEventHandler):
"""Event handler that uploads respecting throttling.
Uploads files every RATE_LIMIT_SECONDS, which changes as the size increases to deal
with throttling.
"""
RATE_LIMIT_SECONDS = 15
unit_dict = dict(util.POW_10_BYTES)
# Wait to upload until size has increased 20% from last upload
RATE_LIMIT_SIZE_INCREASE = 1.2
def __init__(
self,
file_path: PathStr,
save_name: LogicalPath,
file_pusher: "FilePusher",
settings: Optional["SettingsStatic"] = None,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(file_path, save_name, file_pusher, *args, **kwargs)
self._last_uploaded_time: Optional[float] = None
self._last_uploaded_size: int = 0
if settings is not None:
if settings.x_live_policy_rate_limit is not None:
self.RATE_LIMIT_SECONDS = settings.x_live_policy_rate_limit
self._min_wait_time: Optional[float] = settings.x_live_policy_wait_time
else:
self._min_wait_time = None
@property
def current_size(self) -> int:
return os.path.getsize(self.file_path)
@classmethod
def min_wait_for_size(cls, size: int) -> float:
if size < 10 * cls.unit_dict["MB"]:
return 60
elif size < 100 * cls.unit_dict["MB"]:
return 5 * 60
elif size < cls.unit_dict["GB"]:
return 10 * 60
else:
return 20 * 60
def should_update(self) -> bool:
if self._last_uploaded_time is not None:
# Check rate limit by time elapsed
time_elapsed = time.time() - self._last_uploaded_time
# if more than 15 seconds has passed potentially upload it
if time_elapsed < self.RATE_LIMIT_SECONDS:
return False
# Check rate limit by size increase
if float(self._last_uploaded_size) > 0:
size_increase = self.current_size / float(self._last_uploaded_size)
if size_increase < self.RATE_LIMIT_SIZE_INCREASE:
return False
return time_elapsed > (
self._min_wait_time or self.min_wait_for_size(self.current_size)
)
# if the file has never been uploaded, we'll upload it
return True
def on_modified(self, force: bool = False) -> None:
if self.current_size == 0:
return
if self._last_sync == os.path.getmtime(self.file_path):
return
if force or self.should_update():
self.save_file()
def save_file(self) -> None:
self._last_sync = os.path.getmtime(self.file_path)
self._last_uploaded_time = time.time()
self._last_uploaded_size = self.current_size
self._file_pusher.file_changed(self.save_name, self.file_path)
def finish(self) -> None:
self.on_modified(force=True)
@property
def policy(self) -> "PolicyName":
return "live"
|
PolicyLive
|
python
|
spack__spack
|
lib/spack/spack/installer.py
|
{
"start": 109128,
"end": 109255
}
|
class ____(spack.error.InstallError):
"""Raised by install() when a package is only for external use."""
|
ExternalPackageError
|
python
|
numba__numba
|
numba/np/ufunc/ufuncbuilder.py
|
{
"start": 1661,
"end": 1995
}
|
class ____(TargetDescriptor):
options = UFuncTargetOptions
def __init__(self):
super().__init__('ufunc')
@property
def typing_context(self):
return cpu_target.typing_context
@property
def target_context(self):
return cpu_target.target_context
ufunc_target = UFuncTarget()
|
UFuncTarget
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_camellia.py
|
{
"start": 1275,
"end": 1766
}
|
class ____:
test_cbc = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "Camellia"),
["camellia-cbc.txt"],
lambda key, **kwargs: Camellia(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
Camellia(b"\x00" * 16), OFB(b"\x00" * 16)
),
skip_message="Does not support Camellia OFB",
)
|
TestCamelliaModeCBC
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_image13.py
|
{
"start": 315,
"end": 970
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image13.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_row(1, 75)
worksheet.set_column("C:C", 32)
worksheet.insert_image(
"C2", self.image_dir + "logo.png", {"x_offset": 8, "y_offset": 5}
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 750317,
"end": 751286
}
|
class ____(DataSource):
"""
NamedData schema wrapper.
Parameters
----------
name : str
Provide a placeholder name and bind data at runtime.
New data may change the layout but Vega does not always resize the chart. To update
the layout when the data updates, set `autosize
<https://vega.github.io/vega-lite/docs/size.html#autosize>`__ or explicitly use
`view.resize <https://vega.github.io/vega/docs/api/view/#view_resize>`__.
format : dict, :class:`DataFormat`, :class:`CsvDataFormat`, :class:`DsvDataFormat`, :class:`JsonDataFormat`, :class:`TopoDataFormat`
An object that specifies the format for parsing the data.
"""
_schema = {"$ref": "#/definitions/NamedData"}
def __init__(
self,
name: Optional[str] = Undefined,
format: Optional[SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(name=name, format=format, **kwds)
|
NamedData
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/metrics/column_aggregate_metrics/column_values_between_count.py
|
{
"start": 638,
"end": 9439
}
|
class ____(MetricProvider):
"""This metric is an aggregate helper for rare cases."""
metric_name = "column_values.between.count"
value_keys = (
"min_value",
"max_value",
"strict_min",
"strict_max",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas( # noqa: C901, PLR0912 # FIXME CoP
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
min_value = metric_value_kwargs.get("min_value")
max_value = metric_value_kwargs.get("max_value")
strict_min = metric_value_kwargs.get("strict_min")
strict_max = metric_value_kwargs.get("strict_max")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None") # noqa: TRY003 # FIXME CoP
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value") # noqa: TRY003 # FIXME CoP
(
df,
_compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
val = df[accessor_domain_kwargs["column"]]
if min_value is not None and max_value is not None:
if strict_min and strict_max:
series = min_value < val < max_value
elif strict_min:
series = min_value < val <= max_value
elif strict_max:
series = min_value <= val < max_value
else:
series = min_value <= val <= max_value
elif min_value is None and max_value is not None:
if strict_max:
series = val < max_value
else:
series = val <= max_value
elif min_value is not None and max_value is None:
if strict_min:
series = min_value < val
else:
series = min_value <= val
else:
raise ValueError("unable to parse domain and value kwargs") # noqa: TRY003 # FIXME CoP
return np.count_nonzero(series)
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy( # noqa: C901, PLR0912 # FIXME CoP
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
min_value = metric_value_kwargs.get("min_value")
max_value = metric_value_kwargs.get("max_value")
strict_min = metric_value_kwargs.get("strict_min")
strict_max = metric_value_kwargs.get("strict_max")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value") # noqa: TRY003 # FIXME CoP
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None") # noqa: TRY003 # FIXME CoP
dialect_name = execution_engine.engine.dialect.name.lower()
if (
min_value
== get_sql_dialect_floating_point_infinity_value(schema="api_np", negative=True)
) or (
min_value
== get_sql_dialect_floating_point_infinity_value(schema="api_cast", negative=True)
):
min_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=True
)
if (
min_value
== get_sql_dialect_floating_point_infinity_value(schema="api_np", negative=False)
) or (
min_value
== get_sql_dialect_floating_point_infinity_value(schema="api_cast", negative=False)
):
min_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=False
)
if (
max_value
== get_sql_dialect_floating_point_infinity_value(schema="api_np", negative=True)
) or (
max_value
== get_sql_dialect_floating_point_infinity_value(schema="api_cast", negative=True)
):
max_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=True
)
if (
max_value
== get_sql_dialect_floating_point_infinity_value(schema="api_np", negative=False)
) or (
max_value
== get_sql_dialect_floating_point_infinity_value(schema="api_cast", negative=False)
):
max_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=False
)
(
selectable,
_compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
column = sa.column(accessor_domain_kwargs["column"]) # type: ignore[var-annotated] # FIXME CoP
if min_value is None:
if strict_max:
condition = column < max_value
else:
condition = column <= max_value
elif max_value is None:
if strict_min:
condition = column > min_value
else:
condition = column >= min_value
else: # noqa: PLR5501 # FIXME CoP
if strict_min and strict_max:
condition = sa.and_(column > min_value, column < max_value)
elif strict_min:
condition = sa.and_(column > min_value, column <= max_value)
elif strict_max:
condition = sa.and_(column >= min_value, column < max_value)
else:
condition = sa.and_(column >= min_value, column <= max_value)
return execution_engine.execute_query(
sa.select(sa.func.count()).select_from(selectable).where(condition) # type: ignore[arg-type] # FIXME CoP
).scalar()
@metric_value(engine=SparkDFExecutionEngine)
def _spark( # noqa: C901, PLR0912 # FIXME CoP
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
min_value = metric_value_kwargs.get("min_value")
max_value = metric_value_kwargs.get("max_value")
strict_min = metric_value_kwargs.get("strict_min")
strict_max = metric_value_kwargs.get("strict_max")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value") # noqa: TRY003 # FIXME CoP
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None") # noqa: TRY003 # FIXME CoP
(
df,
_compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
column = F.col(accessor_domain_kwargs["column"])
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value") # noqa: TRY003 # FIXME CoP
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None") # noqa: TRY003 # FIXME CoP
if min_value is None:
if strict_max:
condition = column < F.lit(max_value)
else:
condition = column <= F.lit(max_value)
elif max_value is None:
if strict_min:
condition = column > F.lit(min_value)
else:
condition = column >= F.lit(min_value)
else: # noqa: PLR5501 # FIXME CoP
if strict_min and strict_max:
condition = (column > F.lit(min_value)) & (column < F.lit(max_value))
elif strict_min:
condition = (column > F.lit(min_value)) & (column <= F.lit(max_value))
elif strict_max:
condition = (column >= F.lit(min_value)) & (column < F.lit(max_value))
else:
condition = (column >= F.lit(min_value)) & (column <= F.lit(max_value))
return df.filter(condition).count()
|
ColumnValuesBetweenCount
|
python
|
sympy__sympy
|
sympy/parsing/sympy_parser.py
|
{
"start": 38120,
"end": 43313
}
|
class ____(ast.NodeTransformer):
operators = {
ast.Add: 'Add',
ast.Mult: 'Mul',
ast.Pow: 'Pow',
ast.Sub: 'Add',
ast.Div: 'Mul',
ast.BitOr: 'Or',
ast.BitAnd: 'And',
ast.BitXor: 'Xor',
}
functions = (
'Abs', 'im', 're', 'sign', 'arg', 'conjugate',
'acos', 'acot', 'acsc', 'asec', 'asin', 'atan',
'acosh', 'acoth', 'acsch', 'asech', 'asinh', 'atanh',
'cos', 'cot', 'csc', 'sec', 'sin', 'tan',
'cosh', 'coth', 'csch', 'sech', 'sinh', 'tanh',
'exp', 'ln', 'log', 'sqrt', 'cbrt',
)
relational_operators = {
ast.NotEq: 'Ne',
ast.Lt: 'Lt',
ast.LtE: 'Le',
ast.Gt: 'Gt',
ast.GtE: 'Ge',
ast.Eq: 'Eq'
}
def visit_Compare(self, node):
def reducer(acc, op_right):
result, left = acc
op, right = op_right
if op.__class__ not in self.relational_operators:
raise ValueError("Only equation or inequality operators are supported")
new = ast.Call(
func=ast.Name(
id=self.relational_operators[op.__class__], ctx=ast.Load()
),
args=[self.visit(left), self.visit(right)],
keywords=[ast.keyword(arg="evaluate", value=ast.Constant(value=False))],
)
return result + [new], right
args, _ = reduce(
reducer, zip(node.ops, node.comparators), ([], node.left)
)
if len(args) == 1:
return args[0]
return ast.Call(
func=ast.Name(id=self.operators[ast.BitAnd], ctx=ast.Load()),
args=args,
keywords=[ast.keyword(arg="evaluate", value=ast.Constant(value=False))],
)
def flatten(self, args, func):
result = []
for arg in args:
if isinstance(arg, ast.Call):
arg_func = arg.func
if isinstance(arg_func, ast.Call):
arg_func = arg_func.func
if arg_func.id == func:
result.extend(self.flatten(arg.args, func))
else:
result.append(arg)
else:
result.append(arg)
return result
def visit_BinOp(self, node):
if node.op.__class__ in self.operators:
sympy_class = self.operators[node.op.__class__]
right = self.visit(node.right)
left = self.visit(node.left)
rev = False
if isinstance(node.op, ast.Sub):
right = ast.Call(
func=ast.Name(id='Mul', ctx=ast.Load()),
args=[ast.UnaryOp(op=ast.USub(), operand=ast.Constant(1)), right],
keywords=[ast.keyword(arg='evaluate', value=ast.Constant(value=False))]
)
elif isinstance(node.op, ast.Div):
if isinstance(node.left, ast.UnaryOp):
left, right = right, left
rev = True
left = ast.Call(
func=ast.Name(id='Pow', ctx=ast.Load()),
args=[left, ast.UnaryOp(op=ast.USub(), operand=ast.Constant(1))],
keywords=[ast.keyword(arg='evaluate', value=ast.Constant(value=False))]
)
else:
right = ast.Call(
func=ast.Name(id='Pow', ctx=ast.Load()),
args=[right, ast.UnaryOp(op=ast.USub(), operand=ast.Constant(1))],
keywords=[ast.keyword(arg='evaluate', value=ast.Constant(value=False))]
)
if rev: # undo reversal
left, right = right, left
new_node = ast.Call(
func=ast.Name(id=sympy_class, ctx=ast.Load()),
args=[left, right],
keywords=[ast.keyword(arg='evaluate', value=ast.Constant(value=False))]
)
if sympy_class in ('Add', 'Mul'):
# Denest Add or Mul as appropriate
new_node.args = self.flatten(new_node.args, sympy_class)
return new_node
return node
def visit_Call(self, node):
if isinstance(node.func, ast.Name) and node.func.id in self.functions:
func = self.visit(node.func)
args = [self.visit(arg) for arg in node.args]
keywords = [ast.keyword(arg=keyword.arg, value=self.visit(keyword.value)) for keyword in node.keywords]
keywords.append(ast.keyword(arg='evaluate', value=ast.Constant(value=False)))
return ast.Call(func=func, args=args, keywords=keywords)
return self.generic_visit(node)
_transformation = { # items can be added but never re-ordered
0: lambda_notation,
1: auto_symbol,
2: repeated_decimals,
3: auto_number,
4: factorial_notation,
5: implicit_multiplication_application,
6: convert_xor,
7: implicit_application,
8: implicit_multiplication,
9: convert_equals_signs,
10: function_exponentiation,
11: rationalize}
transformations = '\n'.join('%s: %s' % (i, func_name(f)) for i, f in _transformation.items())
|
EvaluateFalseTransformer
|
python
|
pdm-project__pdm
|
src/pdm/cli/commands/venv/list.py
|
{
"start": 216,
"end": 820
}
|
class ____(BaseCommand):
"""List all virtualenvs associated with this project"""
arguments = (verbose_option,)
def handle(self, project: Project, options: argparse.Namespace) -> None:
project.core.ui.echo("Virtualenvs created with this project:\n")
for ident, venv in iter_venvs(project):
saved_python = project._saved_python
if saved_python and Path(saved_python).parent.parent == venv.root:
mark = "*"
else:
mark = "-"
project.core.ui.echo(f"{mark} [success]{ident}[/]: {venv.root}")
|
ListCommand
|
python
|
pytorch__pytorch
|
tools/test/heuristics/test_utils.py
|
{
"start": 341,
"end": 1574
}
|
class ____(unittest.TestCase):
def assertDictAlmostEqual(
self, first: dict[TestRun, Any], second: dict[TestRun, Any]
) -> None:
self.assertEqual(first.keys(), second.keys())
for key in first:
self.assertAlmostEqual(first[key], second[key])
def test_normalize_ratings(self) -> None:
ratings: dict[TestRun, float] = {
TestRun("test1"): 1,
TestRun("test2"): 2,
TestRun("test3"): 4,
}
normalized = utils.normalize_ratings(ratings, 4)
self.assertDictAlmostEqual(normalized, ratings)
normalized = utils.normalize_ratings(ratings, 0.1)
self.assertDictAlmostEqual(
normalized,
{
TestRun("test1"): 0.025,
TestRun("test2"): 0.05,
TestRun("test3"): 0.1,
},
)
normalized = utils.normalize_ratings(ratings, 0.2, min_value=0.1)
self.assertDictAlmostEqual(
normalized,
{
TestRun("test1"): 0.125,
TestRun("test2"): 0.15,
TestRun("test3"): 0.2,
},
)
if __name__ == "__main__":
unittest.main()
|
TestHeuristicsUtils
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/data_preprocessing/rescaling/minmax.py
|
{
"start": 425,
"end": 1703
}
|
class ____(Rescaling, AutoSklearnPreprocessingAlgorithm):
def __init__(
self, random_state: Optional[Union[int, np.random.RandomState]] = None
):
from sklearn.preprocessing import MinMaxScaler
self.preprocessor = MinMaxScaler(copy=False)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
"shortname": "MinMaxScaler",
"name": "MinMaxScaler",
"handles_missing_values": False,
"handles_nominal_values": False,
"handles_numerical_features": True,
"prefers_data_scaled": False,
"prefers_data_normalized": False,
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": True,
# TODO find out if this is right!
"handles_sparse": False,
"handles_dense": True,
"input": (DENSE, UNSIGNED_DATA),
"output": (INPUT, SIGNED_DATA),
"preferred_dtype": None,
}
|
MinMaxScalerComponent
|
python
|
google__pytype
|
pytype/tests/test_disables.py
|
{
"start": 72,
"end": 5761
}
|
class ____(test_base.BaseTest):
"""Test error disabling."""
def test_invalid_directive(self):
errors = self.CheckWithErrors("""
x = 1 # pytype: this is not a valid pytype directive. # invalid-directive
""")
# Invalid directives are just a warning, so has_error() should still
# return False.
self.assertFalse(errors.has_error())
def test_invalid_disable_error_name(self):
errors = self.CheckWithErrors("""
x = 1 # pytype: disable=not-an-error. # invalid-directive[e]
""")
self.assertErrorRegexes(errors, {"e": r"Invalid error name.*not-an-error"})
# Invalid directives are just a warning, so has_error() should still
# return False.
self.assertFalse(errors.has_error())
def test_disable_error(self):
self.InferWithErrors("""
x = a # name-error
x = b # pytype: disable=name-error
x = c # name-error
""")
def test_open_ended_directive(self):
"""Test that disables in the middle of the file can't be left open-ended."""
errors = self.CheckWithErrors("""
'''This is a docstring.
def f(x):
pass
class A:
pass
The above definitions should be ignored.
'''
# pytype: disable=attribute-error # ok (before first class/function def)
CONSTANT = 42
# pytype: disable=not-callable # ok (before first class/function def)
def f(x):
# type: ignore # late-directive[e1]
pass
def g(): pass
x = y # pytype: disable=name-error # ok (single line)
# pytype: disable=attribute-error # ok (re-enabled)
# pytype: disable=wrong-arg-types # late-directive[e2]
# pytype: enable=attribute-error
""")
self.assertErrorRegexes(
errors, {"e1": r"Type checking", "e2": r"wrong-arg-types"}
)
# late-directive is a warning
self.assertFalse(errors.has_error())
def test_skip_file(self):
self.Check("""
# pytype: skip-file
name_error
""")
def test_implicit_return(self):
"""Test that the return is attached to the last line of the function."""
# In python 3.10+ the bytecode line number for the RETURN None is at the
# enclosing control flow statement that falls through to the end. We adjust
# it before reporting the error. In 3.9- it is already set to the last line
# of the function.
self.Check("""
class A:
def f(self) -> str:
if __random__:
if __random__:
return "a" # pytype: disable=bad-return-type
def g() -> str:
pass # pytype: disable=bad-return-type
def h() -> str:
return ([1,
2,
3]) # pytype: disable=bad-return-type
""")
def test_implicit_return_empty_function(self):
# Check that we find the start of a function correctly and not the end of
# the previous one.
self.Check("""
def f():
pass
def j() -> str:
'''docstring''' # pytype: disable=bad-return-type
""")
def test_implicit_return_not_at_end(self):
self.Check("""
import logging
def f() -> str:
try:
return ''
except KeyError:
logging.exception( # pytype: disable=bad-return-type
'oops')
""")
def test_implicit_return_annotated_nested_function(self):
self.Check("""
import logging
def f():
def g() -> str:
try:
return ''
except:
logging.exception('oops') # pytype: disable=bad-return-type
return g
""")
def test_implicit_return_annotated_outer_function(self):
self.Check("""
def f() -> str:
def g():
pass
pass # pytype: disable=bad-return-type
""")
def test_silence_variable_mismatch(self):
self.Check("""
x = [
0,
] # type: None # pytype: disable=annotation-type-mismatch
""")
def test_disable_location(self):
self.Check("""
import re
re.sub(
'', object(), '') # pytype: disable=wrong-arg-types
""")
def test_skip_file_with_comment(self):
self.Check("""
# pytype: skip-file # extra comment here
import nonsense
""")
def test_missing_parameter_disable(self):
self.Check("""
class Foo:
def __iter__(self, x, y):
pass
def f(x):
pass
f(
x=[x for x in Foo], # pytype: disable=missing-parameter
)
""")
def test_silence_parameter_mismatch(self):
self.Check("""
def f(
x: int = 0.0,
y: str = '',
**kwargs,
): # pytype: disable=annotation-type-mismatch
pass
""")
def test_do_not_silence_parameter_mismatch(self):
self.CheckWithErrors("""
def f( # annotation-type-mismatch
x: int = 0.0,
y: str = '',
**kwargs,
):
pass # pytype: disable=annotation-type-mismatch
""")
def test_container_disable(self):
self.Check("""
x: list[int] = []
x.append(
''
) # pytype: disable=container-type-mismatch
""")
def test_multiple_directives(self):
"""We should support multiple directives on one line."""
self.Check("""
a = list() # type: list[int, str] # pytype: disable=invalid-annotation
b = list() # pytype: disable=invalid-annotation # type: list[int, str]
def foo(x): pass
c = foo(a, b.i) # pytype: disable=attribute-error # pytype: disable=wrong-arg-count
""")
def test_bare_annotation(self):
self.Check("""
from typing import AnyStr
def f():
x: AnyStr # pytype: disable=invalid-annotation
""")
|
DisableTest
|
python
|
django__django
|
tests/generic_views/views.py
|
{
"start": 4469,
"end": 4745
}
|
class ____(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = "generic_views/form.html"
context_object_name = "thingy"
def get_success_url(self):
return reverse("author_detail", args=[self.object.id])
|
SpecializedAuthorUpdate
|
python
|
Netflix__metaflow
|
metaflow/plugins/env_escape/communication/channel.py
|
{
"start": 45,
"end": 1650
}
|
class ____(object):
"""
Channel is a higher level abstraction over a low-level bytestream.
You can send and receive JSON serializable object directly with this interface
For now this class does not do much, but we could imagine some sort compression or other
transformation being added here
"""
def __init__(self, stream):
self._stream = stream
self._fmt = struct.Struct("<I")
def send(self, obj):
try:
to_send = json.dumps(obj, ensure_ascii=False, separators=(",", ":")).encode(
"utf-8"
)
sz = len(to_send)
self._stream.write(self._fmt.pack(sz))
self._stream.write(to_send)
except EOFError as e:
raise RuntimeError("Cannot send object over streaming interface: %s" % e)
except BaseException as e:
raise ValueError("Cannot serialize object: %s" % traceback.format_exc())
def recv(self, timeout=None):
# To receive, we first receive the size of the object and then the object itself
try:
sz_bytes = self._stream.read(self._fmt.size, timeout)
msg_sz = self._fmt.unpack(sz_bytes)[0]
obj_bytes = self._stream.read(msg_sz, timeout)
return json.loads(obj_bytes)
except EOFError as e:
raise RuntimeError("Cannot receive object over streaming interface: %s" % e)
except BaseException as e:
raise ValueError("Cannot deserialize object: %s" % traceback.format_exc())
def fileno(self):
return self._stream.fileno()
|
Channel
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/datamodels/common.py
|
{
"start": 1400,
"end": 1578
}
|
class ____(enum.Enum):
"""Bulk Action to be taken if the entity already exists or not."""
FAIL = "fail"
SKIP = "skip"
OVERWRITE = "overwrite"
|
BulkActionOnExistence
|
python
|
encode__django-rest-framework
|
rest_framework/renderers.py
|
{
"start": 1809,
"end": 4092
}
|
class ____(BaseRenderer):
"""
Renderer which serializes to JSON.
"""
media_type = 'application/json'
format = 'json'
encoder_class = encoders.JSONEncoder
ensure_ascii = not api_settings.UNICODE_JSON
compact = api_settings.COMPACT_JSON
strict = api_settings.STRICT_JSON
# We don't set a charset because JSON is a binary encoding,
# that can be encoded as utf-8, utf-16 or utf-32.
# See: https://www.ietf.org/rfc/rfc4627.txt
# Also: http://lucumr.pocoo.org/2013/7/19/application-mimetypes-and-encodings/
charset = None
def get_indent(self, accepted_media_type, renderer_context):
if accepted_media_type:
# If the media type looks like 'application/json; indent=4',
# then pretty print the result.
# Note that we coerce `indent=0` into `indent=None`.
base_media_type, params = parse_header_parameters(accepted_media_type)
with contextlib.suppress(KeyError, ValueError, TypeError):
return zero_as_none(max(min(int(params['indent']), 8), 0))
# If 'indent' is provided in the context, then pretty print the result.
# E.g. If we're being called by the BrowsableAPIRenderer.
return renderer_context.get('indent', None)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return b''
renderer_context = renderer_context or {}
indent = self.get_indent(accepted_media_type, renderer_context)
if indent is None:
separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS
else:
separators = INDENT_SEPARATORS
ret = json.dumps(
data, cls=self.encoder_class,
indent=indent, ensure_ascii=self.ensure_ascii,
allow_nan=not self.strict, separators=separators
)
# We always fully escape \u2028 and \u2029 to ensure we output JSON
# that is a strict javascript subset.
# See: https://gist.github.com/damncabbage/623b879af56f850a6ddc
ret = ret.replace('\u2028', '\\u2028').replace('\u2029', '\\u2029')
return ret.encode()
|
JSONRenderer
|
python
|
langchain-ai__langchain
|
libs/cli/langchain_cli/integration_template/tests/integration_tests/test_tools.py
|
{
"start": 148,
"end": 925
}
|
class ____(ToolsIntegrationTests):
@property
def tool_constructor(self) -> Type[__ModuleName__Tool]:
return __ModuleName__Tool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not have
`{"name", "id", "args"}` keys.
"""
return {"a": 2, "b": 3}
|
TestParrotMultiplyToolIntegration
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/worksheet/test_write_tab_color.py
|
{
"start": 301,
"end": 838
}
|
class ____(unittest.TestCase):
"""
Test the Worksheet _write_tab_color() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_tab_color(self):
"""Test the _write_tab_color() method"""
self.worksheet.set_tab_color("red")
self.worksheet._write_tab_color()
exp = """<tabColor rgb="FFFF0000"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestWriteTabColor
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/organization_member/response.py
|
{
"start": 1622,
"end": 2140
}
|
class ____(TypedDict):
externalUsers: NotRequired[list[ExternalActorResponse]]
role: NotRequired[str] # Deprecated: use orgRole
roleName: NotRequired[str] # Deprecated
id: str
email: str
name: str
# User may be optional b/c invites don't have users yet
user: NotRequired[UserSerializerResponse]
orgRole: str
pending: bool
expired: bool
flags: _OrganizationMemberFlags
dateCreated: datetime
inviteStatus: str
inviterName: str | None
|
OrganizationMemberResponse
|
python
|
ipython__ipython
|
tests/test_decorators.py
|
{
"start": 2657,
"end": 3663
}
|
class ____(object):
"""FooClass
Example:
>>> 1+1
2
"""
@skip_doctest
def __init__(self, x):
"""Make a FooClass.
Example:
>>> f = FooClass(3)
junk
"""
print("Making a FooClass.")
self.x = x
@skip_doctest
def bar(self, y):
"""Example:
>>> ff = FooClass(3)
>>> ff.bar(0)
boom!
>>> 1/0
bam!
"""
return 1 / y
def baz(self, y):
"""Example:
>>> ff2 = FooClass(3)
Making a FooClass.
>>> ff2.baz(3)
True
"""
return self.x == y
def test_skip_dt_decorator2():
"""Doctest-skipping decorator should preserve function signature."""
# Hardcoded correct answer
dtargs = (["x", "y"], None, "k", (1,))
# Introspect out the value
dtargsr = getargspec(doctest_bad)
assert dtargsr == dtargs, "Incorrectly reconstructed args for doctest_bad: %s" % (
dtargsr,
)
|
FooClass
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/dml/test_bulk.py
|
{
"start": 34467,
"end": 36300
}
|
class ____(BulkTest, fixtures.DeclarativeMappedTest):
__sparse_driver_backend__ = True
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class User(Base):
__tablename__ = "users"
id = Column(Integer, Identity(), primary_key=True)
name = Column(String(255), nullable=False)
def test_issue_6793(self):
User = self.classes.User
session = fixture_session()
with self.sql_execution_asserter() as asserter:
session.bulk_save_objects([User(name="A"), User(name="B")])
session.add(User(name="C"))
session.add(User(name="D"))
session.flush()
asserter.assert_(
Conditional(
testing.db.dialect.insert_executemany_returning,
[
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "A"}, {"name": "B"}],
),
CompiledSQL(
"INSERT INTO users (name) VALUES (:name) "
"RETURNING users.id",
[{"name": "C"}, {"name": "D"}],
),
],
[
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "A"}, {"name": "B"}],
),
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "C"}],
),
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
[{"name": "D"}],
),
],
)
)
|
BulkIssue6793Test
|
python
|
huggingface__transformers
|
src/transformers/models/dots1/modeling_dots1.py
|
{
"start": 9361,
"end": 13024
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Dots1Config, layer_idx: int):
super().__init__()
self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
self.q_norm = Dots1RMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
self.k_norm = Dots1RMSNorm(self.head_dim, eps=config.rms_norm_eps) # thus post q_norm does not need reshape
self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window, # diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
Dots1Attention
|
python
|
getsentry__sentry
|
src/sentry/snuba/entity_subscription.py
|
{
"start": 20662,
"end": 25557
}
|
class ____(BaseCrashRateMetricsEntitySubscription):
metric_key: SessionMRI = SessionMRI.RAW_USER
def get_snql_aggregations(self) -> list[str]:
return [
"uniq() as count",
"uniqIf(session.status, crashed) as crashed",
]
EntitySubscription = Union[
EventsEntitySubscription,
MetricsCountersEntitySubscription,
MetricsSetsEntitySubscription,
PerformanceTransactionsEntitySubscription,
PerformanceMetricsEntitySubscription,
PerformanceSpansEAPRpcEntitySubscription,
]
def get_entity_subscription(
query_type: SnubaQuery.Type,
dataset: Dataset,
aggregate: str,
time_window: int,
extra_fields: _EntitySpecificParams | None = None,
) -> EntitySubscription:
"""
Function that routes to the correct instance of `EntitySubscription` based on the query type and
dataset, and additionally does validation on aggregate for the sessions and metrics datasets
then returns the instance of `EntitySubscription`
"""
entity_subscription_cls: type[EntitySubscription] | None = None
if query_type == SnubaQuery.Type.ERROR:
entity_subscription_cls = EventsEntitySubscription
if query_type == SnubaQuery.Type.PERFORMANCE:
if dataset == Dataset.Transactions:
entity_subscription_cls = PerformanceTransactionsEntitySubscription
elif dataset in (Dataset.Metrics, Dataset.PerformanceMetrics):
entity_subscription_cls = PerformanceMetricsEntitySubscription
elif dataset == Dataset.EventsAnalyticsPlatform:
entity_subscription_cls = PerformanceSpansEAPRpcEntitySubscription
if query_type == SnubaQuery.Type.CRASH_RATE:
entity_key = determine_crash_rate_alert_entity(aggregate)
if entity_key == EntityKey.MetricsCounters:
entity_subscription_cls = MetricsCountersEntitySubscription
if entity_key == EntityKey.MetricsSets:
entity_subscription_cls = MetricsSetsEntitySubscription
if entity_subscription_cls is None:
raise UnsupportedQuerySubscription(
f"Couldn't determine entity subscription for query type {query_type} with dataset {dataset}"
)
return entity_subscription_cls(aggregate, time_window, extra_fields)
def determine_crash_rate_alert_entity(aggregate: str) -> EntityKey:
match = re.match(CRASH_RATE_ALERT_AGGREGATE_RE, aggregate)
if not match:
raise UnsupportedQuerySubscription(
"Only crash free percentage queries are supported for crash rate alerts"
)
count_col_matched = match.group(2)
return EntityKey.MetricsCounters if count_col_matched == "sessions" else EntityKey.MetricsSets
def get_entity_key_from_request(request: Request) -> EntityKey:
match = request.query.match
if isinstance(match, Join):
# XXX: Is there a better way to handle this
match = match.relationships[0].lhs
return EntityKey(match.name)
def get_entity_from_query_builder(query_builder: BaseQueryBuilder) -> Entity | None:
request = query_builder.get_snql_query()
match = request.query.match
if isinstance(match, Join):
# need to specify Entity for Join queries
match = match.relationships[0].lhs
return Entity(name=match.name, alias=match.name)
return None
def get_entity_key_from_query_builder(query_builder: BaseQueryBuilder) -> EntityKey:
return get_entity_key_from_request(query_builder.get_snql_query())
def get_entity_subscription_from_snuba_query(
snuba_query: SnubaQuery, organization_id: int
) -> EntitySubscription:
query_dataset = Dataset(snuba_query.dataset)
return get_entity_subscription(
SnubaQuery.Type(snuba_query.type),
query_dataset,
snuba_query.aggregate,
snuba_query.time_window,
extra_fields={
"org_id": organization_id,
"event_types": snuba_query.event_types,
"extrapolation_mode": ExtrapolationMode(snuba_query.extrapolation_mode),
},
)
def get_entity_key_from_snuba_query(
snuba_query: SnubaQuery,
organization_id: int,
project_id: int,
skip_field_validation_for_entity_subscription_deletion: bool = False,
) -> EntityKey:
query_dataset = Dataset(snuba_query.dataset)
if query_dataset == Dataset.EventsAnalyticsPlatform:
return EntityKey.EAPItems
entity_subscription = get_entity_subscription_from_snuba_query(
snuba_query,
organization_id,
)
query_builder = entity_subscription.build_query_builder(
snuba_query.query,
[project_id],
snuba_query.environment,
{"organization_id": organization_id},
skip_field_validation_for_entity_subscription_deletion=skip_field_validation_for_entity_subscription_deletion,
)
return get_entity_key_from_query_builder(query_builder)
|
MetricsSetsEntitySubscription
|
python
|
python-openxml__python-docx
|
src/docx/shared.py
|
{
"start": 2443,
"end": 2670
}
|
class ____(Length):
"""Convenience constructor for length in millimeters, e.g. ``width = Mm(240.5)``."""
def __new__(cls, mm: float):
emu = int(mm * Length._EMUS_PER_MM)
return Length.__new__(cls, emu)
|
Mm
|
python
|
doocs__leetcode
|
solution/0500-0599/0575.Distribute Candies/Solution.py
|
{
"start": 0,
"end": 139
}
|
class ____:
def distributeCandies(self, candyType: List[int]) -> int:
return min(len(candyType) >> 1, len(set(candyType)))
|
Solution
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/glue_crawler.py
|
{
"start": 1449,
"end": 5310
}
|
class ____(AwsBaseOperator[GlueCrawlerHook]):
"""
Creates, updates and triggers an AWS Glue Crawler.
AWS Glue Crawler is a serverless service that manages a catalog of
metadata tables that contain the inferred schema, format and data
types of data stores within the AWS cloud.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GlueCrawlerOperator`
:param config: Configurations for the AWS Glue crawler
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check crawler status
:param wait_for_completion: Whether to wait for crawl execution completion. (default: True)
:param deferrable: If True, the operator will wait asynchronously for the crawl to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = GlueCrawlerHook
template_fields: Sequence[str] = aws_template_fields(
"config",
)
ui_color = "#ededed"
def __init__(
self,
config,
poll_interval: int = 5,
wait_for_completion: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.poll_interval = poll_interval
self.wait_for_completion = wait_for_completion
self.deferrable = deferrable
self.config = config
def execute(self, context: Context) -> str:
"""
Execute AWS Glue Crawler from Airflow.
:return: the name of the current glue crawler.
"""
crawler_name = self.config["Name"]
if self.hook.has_crawler(crawler_name):
self.hook.update_crawler(**self.config)
else:
self.hook.create_crawler(**self.config)
self.log.info("Triggering AWS Glue Crawler")
self.hook.start_crawler(crawler_name)
if self.deferrable:
self.defer(
trigger=GlueCrawlerCompleteTrigger(
crawler_name=crawler_name,
waiter_delay=self.poll_interval,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
verify=self.verify,
botocore_config=self.botocore_config,
),
method_name="execute_complete",
)
elif self.wait_for_completion:
self.log.info("Waiting for AWS Glue Crawler")
self.hook.wait_for_crawler_completion(crawler_name=crawler_name, poll_interval=self.poll_interval)
return crawler_name
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error in glue crawl: {validated_event}")
return self.config["Name"]
|
GlueCrawlerOperator
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_nn.py
|
{
"start": 165292,
"end": 172865
}
|
class ____(InputVariableMixin, TestBase): # type: ignore[misc]
# TODO: check that criterions don't ignore grad_output
_required_arg_names = TestBase._required_arg_names.union({'target'})
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.should_test_cuda = kwargs.get('test_cuda', True)
self.check_forward_only = kwargs.get('check_forward_only', False)
self.check_gradgrad = kwargs.get('check_gradgrad', True)
self.check_half = kwargs.get('check_half', True)
self.check_bfloat16 = kwargs.get('check_bfloat16', False)
self.check_complex = kwargs.get('check_complex', False)
self.test_cpu = kwargs.get('test_cpu', True)
self.with_tf32 = kwargs.get('with_tf32', True)
self.tf32_precision = kwargs.get('tf32_precision', 0.001)
self.check_batched_grad = kwargs.get('check_batched_grad', True)
self.default_dtype = kwargs.get('default_dtype')
if self.default_dtype is None:
self.default_dtype = torch.get_default_dtype()
def __call__(self, test_case):
with set_default_dtype(self.default_dtype):
module = self.constructor(*self.constructor_args)
input = self._get_input()
# Check that these methods don't raise errors
module.__repr__()
str(module)
target = self._get_target()
if self.reference_fn is not None:
out = test_case._forward_criterion(module, input, target, extra_args=self.extra_args)
ref_args = (deepcopy(input), deepcopy(target)) + self.extra_args + (module,)
expected_out = self.reference_fn(*ref_args)
test_case.assertEqual(out, expected_out)
if self.check_forward_only:
return
params = tuple(x for x in module.parameters())
if not isinstance(input, tuple):
inputs = (input,) + params + (target,)
def apply_fn(input, target, *params):
return module(input, target)
else:
inputs = input + params + (target,)
def apply_fn(input1, input2, target, *params): # type: ignore[misc]
return module(input1, input2, target)
gradcheck(apply_fn, inputs, check_batched_grad=self.check_batched_grad)
if self.check_gradgrad:
gradgradcheck(apply_fn, inputs, check_batched_grad=self.check_batched_grad)
def test_cuda(self, test_case, dtype, extra_args=None):
def convert_dtype(obj, dtype, requires_grad=False):
if isinstance(obj, torch.Tensor):
return obj.detach().to(dtype=dtype).requires_grad_(requires_grad)
elif isinstance(obj, tuple):
return tuple(convert_dtype(o, dtype, requires_grad) for o in obj)
else:
return obj
if not TEST_CUDA or not self.should_test_cuda:
raise unittest.SkipTest('Excluded from CUDA tests')
with set_default_dtype(self.default_dtype):
cpu_input = self._get_input()
cpu_target = self._get_target()
cpu_module = self.constructor(*self.constructor_args)
gpu_module = self.constructor(*self.constructor_args)
# Convert input, target and module parameters to dtype
cpu_input = convert_dtype(cpu_input, dtype, True)
if cpu_target.is_floating_point() or cpu_target.is_complex():
cpu_target = convert_dtype(cpu_target, dtype)
cpu_module.type(dtype)
gpu_module.type(dtype)
# GPU setup
gpu_input = to_gpu(cpu_input)
gpu_target = to_gpu(cpu_target)
gpu_module.cuda()
# torch.HalfTensor doesn't support most operations, converting back to default
if dtype in {torch.half, torch.bfloat16}:
cpu_input = self._get_input()
cpu_target = self._get_target()
# Loss modules with weights require consistent input/module weight types
cpu_module = self.constructor(*self.constructor_args)
cpu_output = test_case._forward_criterion(cpu_module, cpu_input, cpu_target, extra_args=extra_args)
gpu_output = test_case._forward_criterion(gpu_module, gpu_input, gpu_target, extra_args=extra_args)
# dtype used to be able to be None, so set precision in this way instead of a precision map
test_case.assertEqual(cpu_output, gpu_output,
atol=1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4, rtol=0, exact_dtype=False)
cpu_gradInput = test_case._backward_criterion(
cpu_module, cpu_input, cpu_output, cpu_target, extra_args=extra_args)
gpu_gradInput = test_case._backward_criterion(
gpu_module, gpu_input, gpu_output, gpu_target, extra_args=extra_args)
# dtype used to be able to be None, so set precision in this way instead of a precision map
test_case.assertEqual(cpu_gradInput, gpu_gradInput,
atol=1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4, rtol=0, exact_dtype=False)
def _get_target(self):
return self._get_arg('target', False)
@property
def constructor_args(self):
return self._get_arg('constructor_args', False)
@property
def extra_args(self):
return self._get_arg('extra_args', False)
def _test_bfloat16_ops(test_case, op, device, inp_dims=(), prec=1e-2, scale_factor=None):
# fp32 compute
input1 = torch.randn(inp_dims, dtype=torch.float32, device=device, requires_grad=True)
if scale_factor is not None:
input1 = (torch.rand(inp_dims, dtype=torch.bfloat16, device=device) * scale_factor).float().requires_grad_()
out1 = op(input1)
grad_input1 = torch.randn_like(out1, device=device)
out1.backward(grad_input1)
# bfloat16 compute
op_bfp16 = op.bfloat16()
input2 = input1.detach().bfloat16().requires_grad_()
grad_input2 = grad_input1.bfloat16()
out2 = op_bfp16(input2)
out2.backward(grad_input2)
test_case.assertEqual(out1, out2, atol=prec, rtol=prec, exact_dtype=False)
test_case.assertEqual(input1.grad.data, input2.grad.data, atol=prec, rtol=prec, exact_dtype=False)
def _test_module_empty_input(test_case, module, inp, check_size=True, inference=False):
if not inference:
inp.requires_grad_(True)
out = module(inp)
if not inference:
gO = torch.rand_like(out)
out.backward(gO)
if check_size:
test_case.assertEqual(out.size(), inp.size())
if not inference:
for p in module.parameters():
if p.requires_grad:
test_case.assertEqual(p.grad, torch.zeros_like(p.grad))
test_case.assertEqual(inp.grad, torch.zeros_like(inp))
def _create_basic_net():
class Layer(nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer_dummy_param = nn.Parameter(torch.empty(3, 5))
self.layer_dummy_buf = nn.Buffer(torch.zeros(1, 3, 3, 7))
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.l1 = Layer()
self.dummy_param = nn.Parameter(torch.empty(3, 5))
self.dummy_buf = nn.Buffer(torch.zeros(7, 3, 3, 1))
l = Layer()
n = Net()
s = nn.Sequential(n, n)
return l, n, s
|
CriterionTest
|
python
|
GoogleCloudPlatform__python-docs-samples
|
appengine/standard/ndb/async/shopping_cart_test.py
|
{
"start": 3419,
"end": 4235
}
|
class ____(ndb.Model):
value = ndb.IntegerProperty()
def test_update_counter_async(testbed):
counter_key = Counter(value=1).put()
update_counter = shopping_cart.define_update_counter_async()
future = update_counter(counter_key)
assert counter_key.get().value == 1
assert future.get_result() == 2
assert counter_key.get().value == 2
def test_update_counter_tasklet(testbed):
counter_key = Counter(value=1).put()
update_counter = shopping_cart.define_update_counter_tasklet()
future = update_counter(counter_key)
assert counter_key.get().value == 1
future.get_result()
assert counter_key.get().value == 2
def test_get_first_ready(testbed):
testbed.init_urlfetch_stub()
content = shopping_cart.get_first_ready()
assert "html" in content.lower()
|
Counter
|
python
|
doocs__leetcode
|
solution/0300-0399/0375.Guess Number Higher or Lower II/Solution.py
|
{
"start": 0,
"end": 372
}
|
class ____:
def getMoneyAmount(self, n: int) -> int:
f = [[0] * (n + 1) for _ in range(n + 1)]
for i in range(n - 1, 0, -1):
for j in range(i + 1, n + 1):
f[i][j] = j + f[i][j - 1]
for k in range(i, j):
f[i][j] = min(f[i][j], max(f[i][k - 1], f[k + 1][j]) + k)
return f[1][n]
|
Solution
|
python
|
django__django
|
django/db/backends/ddl_references.py
|
{
"start": 5815,
"end": 7355
}
|
class ____(Reference):
"""
Statement template and formatting parameters container.
Allows keeping a reference to a statement without interpolating identifiers
that might have to be adjusted if they're referencing a table or column
that is removed
"""
def __init__(self, template, **parts):
self.template = template
self.parts = parts
def references_table(self, table):
return any(
hasattr(part, "references_table") and part.references_table(table)
for part in self.parts.values()
)
def references_column(self, table, column):
return any(
hasattr(part, "references_column") and part.references_column(table, column)
for part in self.parts.values()
)
def references_index(self, table, index):
return any(
hasattr(part, "references_index") and part.references_index(table, index)
for part in self.parts.values()
)
def rename_table_references(self, old_table, new_table):
for part in self.parts.values():
if hasattr(part, "rename_table_references"):
part.rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
for part in self.parts.values():
if hasattr(part, "rename_column_references"):
part.rename_column_references(table, old_column, new_column)
def __str__(self):
return self.template % self.parts
|
Statement
|
python
|
ray-project__ray
|
python/ray/client_builder.py
|
{
"start": 11138,
"end": 14589
}
|
class ____(ClientBuilder):
def connect(self) -> ClientContext:
"""
Begin a connection to the address passed in via ray.client(...)
"""
if self._deprecation_warn_enabled:
self._client_deprecation_warn()
# Fill runtime env/namespace from environment if not already set.
# Should be done *after* the deprecation warning, since warning will
# check if those values are already set.
self._fill_defaults_from_env()
connection_dict = ray.init(address=self.address, job_config=self._job_config)
return ClientContext(
dashboard_url=connection_dict["webui_url"],
python_version="{}.{}.{}".format(
sys.version_info[0], sys.version_info[1], sys.version_info[2]
),
ray_version=ray.__version__,
ray_commit=ray.__commit__,
_num_clients=1,
_context_to_restore=None,
)
def _split_address(address: str) -> Tuple[str, str]:
"""
Splits address into a module string (scheme) and an inner_address.
If the scheme is not present, then "ray://" is prepended to the address.
"""
if "://" not in address:
address = "ray://" + address
return split_address(address)
def _get_builder_from_address(address: Optional[str]) -> ClientBuilder:
if address == "local":
return _LocalClientBuilder("local")
if address is None:
# NOTE: This is not placed in `Node::get_temp_dir_path`, because
# this file is accessed before the `Node` object is created.
address = ray._private.services.canonicalize_bootstrap_address(address)
return _LocalClientBuilder(address)
module_string, inner_address = _split_address(address)
try:
module = importlib.import_module(module_string)
except Exception as e:
raise RuntimeError(
f"Module: {module_string} does not exist.\n"
f"This module was parsed from Address: {address}"
) from e
assert "ClientBuilder" in dir(
module
), f"Module: {module_string} does not have ClientBuilder."
return module.ClientBuilder(inner_address)
@Deprecated
def client(
address: Optional[str] = None, _deprecation_warn_enabled: bool = True
) -> ClientBuilder:
"""
Creates a ClientBuilder based on the provided address. The address can be
of the following forms:
* None: Connects to or creates a local cluster and connects to it.
* ``"local"``: Creates a new cluster locally and connects to it.
* ``"IP:Port"``: Connects to a Ray Client Server at the given address.
* ``"module://inner_address"``: load module.ClientBuilder & pass
inner_address
The _deprecation_warn_enabled flag enables deprecation warnings, and is
for internal use only. Set it to False to suppress client deprecation
warnings.
"""
env_address = os.environ.get(RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if env_address and address is None:
logger.debug(
f"Using address ({env_address}) instead of auto-detection "
f"because {RAY_ADDRESS_ENVIRONMENT_VARIABLE} is set."
)
address = env_address
builder = _get_builder_from_address(address)
# Disable client deprecation warn when ray.client is used internally
builder._deprecation_warn_enabled = _deprecation_warn_enabled
return builder
|
_LocalClientBuilder
|
python
|
django__django
|
tests/admin_inlines/models.py
|
{
"start": 10050,
"end": 10259
}
|
class ____(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
title = models.CharField(max_length=128)
parent = models.ForeignKey(UUIDParent, on_delete=models.CASCADE)
|
UUIDChild
|
python
|
astropy__astropy
|
astropy/io/fits/diff.py
|
{
"start": 45994,
"end": 60021
}
|
class ____(_BaseDiff):
"""
Diff two table data arrays. It doesn't matter whether the data originally
came from a binary or ASCII table--the data should be passed in as a
recarray.
`TableDataDiff` objects have the following diff attributes:
- ``diff_column_count``: If the tables being compared have different
numbers of columns, this contains a 2-tuple of the column count in each
table. Even if the tables have different column counts, an attempt is
still made to compare any columns they have in common.
- ``diff_columns``: If either table contains columns unique to that table,
either in name or format, this contains a 2-tuple of lists. The first
element is a list of columns (these are full `Column` objects) that
appear only in table a. The second element is a list of tables that
appear only in table b. This only lists columns with different column
definitions, and has nothing to do with the data in those columns.
- ``diff_column_names``: This is like ``diff_columns``, but lists only the
names of columns unique to either table, rather than the full `Column`
objects.
- ``diff_column_attributes``: Lists columns that are in both tables but
have different secondary attributes, such as TUNIT or TDISP. The format
is a list of 2-tuples: The first a tuple of the column name and the
attribute, the second a tuple of the different values.
- ``diff_values``: `TableDataDiff` compares the data in each table on a
column-by-column basis. If any different data is found, it is added to
this list. The format of this list is similar to the ``diff_pixels``
attribute on `ImageDataDiff` objects, though the "index" consists of a
(column_name, row) tuple. For example::
[('TARGET', 0), ('NGC1001', 'NGC1002')]
shows that the tables contain different values in the 0-th row of the
'TARGET' column.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
`TableDataDiff` objects also have a ``common_columns`` attribute that lists
the `Column` objects for columns that are identical in both tables, and a
``common_column_names`` attribute which contains a set of the names of
those columns.
"""
def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.ignore_fields = set(ignore_fields)
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.common_columns = []
self.common_column_names = set()
# self.diff_columns contains columns with different column definitions,
# but not different column data. Column data is only compared in
# columns that have the same definitions
self.diff_rows = ()
self.diff_column_count = ()
self.diff_columns = ()
# If two columns have the same name+format, but other attributes are
# different (such as TUNIT or such) they are listed here
self.diff_column_attributes = []
# Like self.diff_columns, but just contains a list of the column names
# unique to each table, and in the order they appear in the tables
self.diff_column_names = ()
self.diff_values = []
self.diff_ratio = 0
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
# Much of the code for comparing columns is similar to the code for
# comparing headers--consider refactoring
colsa = self.a.columns
colsb = self.b.columns
if len(colsa) != len(colsb):
self.diff_column_count = (len(colsa), len(colsb))
# Even if the number of columns are unequal, we still do comparison of
# any common columns
colsa = {c.name.lower(): c for c in colsa}
colsb = {c.name.lower(): c for c in colsb}
if "*" in self.ignore_fields:
# If all columns are to be ignored, ignore any further differences
# between the columns
return
# Keep the user's original ignore_fields list for reporting purposes,
# but internally use a case-insensitive version
ignore_fields = {f.lower() for f in self.ignore_fields}
# It might be nice if there were a cleaner way to do this, but for now
# it'll do
for fieldname in ignore_fields:
fieldname = fieldname.lower()
if fieldname in colsa:
del colsa[fieldname]
if fieldname in colsb:
del colsb[fieldname]
colsa_set = set(colsa.values())
colsb_set = set(colsb.values())
self.common_columns = sorted(
colsa_set.intersection(colsb_set), key=operator.attrgetter("name")
)
self.common_column_names = {col.name.lower() for col in self.common_columns}
left_only_columns = {
col.name.lower(): col for col in colsa_set.difference(colsb_set)
}
right_only_columns = {
col.name.lower(): col for col in colsb_set.difference(colsa_set)
}
if left_only_columns or right_only_columns:
self.diff_columns = (left_only_columns, right_only_columns)
self.diff_column_names = ([], [])
if left_only_columns:
for col in self.a.columns:
if col.name.lower() in left_only_columns:
self.diff_column_names[0].append(col.name)
if right_only_columns:
for col in self.b.columns:
if col.name.lower() in right_only_columns:
self.diff_column_names[1].append(col.name)
# If the tables have a different number of rows, we don't compare the
# columns right now.
# TODO: It might be nice to optionally compare the first n rows where n
# is the minimum of the row counts between the two tables.
if len(self.a) != len(self.b):
self.diff_rows = (len(self.a), len(self.b))
return
# If the tables contain no rows there's no data to compare, so we're
# done at this point. (See ticket #178)
if len(self.a) == len(self.b) == 0:
return
# Like in the old fitsdiff, compare tables on a column by column basis
# The difficulty here is that, while FITS column names are meant to be
# case-insensitive, Astropy still allows, for the sake of flexibility,
# two columns with the same name but different case. When columns are
# accessed in FITS tables, a case-sensitive is tried first, and failing
# that a case-insensitive match is made.
# It's conceivable that the same column could appear in both tables
# being compared, but with different case.
# Though it *may* lead to inconsistencies in these rare cases, this
# just assumes that there are no duplicated column names in either
# table, and that the column names can be treated case-insensitively.
for col in self.common_columns:
name_lower = col.name.lower()
if name_lower in ignore_fields:
continue
cola = colsa[name_lower]
colb = colsb[name_lower]
for attr, _ in _COL_ATTRS:
vala = getattr(cola, attr, None)
valb = getattr(colb, attr, None)
if diff_values(vala, valb):
self.diff_column_attributes.append(
((col.name.upper(), attr), (vala, valb))
)
arra = self.a[col.name]
arrb = self.b[col.name]
if np.issubdtype(arra.dtype, np.floating) and np.issubdtype(
arrb.dtype, np.floating
):
diffs = where_not_allclose(arra, arrb, rtol=self.rtol, atol=self.atol)
elif "P" in col.format or "Q" in col.format:
diffs = (
[
idx
for idx in range(len(arra))
if not np.allclose(
arra[idx], arrb[idx], rtol=self.rtol, atol=self.atol
)
],
)
else:
diffs = np.where(arra != arrb)
self.diff_total += len(set(diffs[0]))
if self.numdiffs >= 0:
if len(self.diff_values) >= self.numdiffs:
# Don't save any more diff values
continue
# Add no more diff'd values than this
max_diffs = self.numdiffs - len(self.diff_values)
else:
max_diffs = len(diffs[0])
last_seen_idx = None
for idx in islice(diffs[0], 0, max_diffs):
if idx == last_seen_idx:
# Skip duplicate indices, which my occur when the column
# data contains multi-dimensional values; we're only
# interested in storing row-by-row differences
continue
last_seen_idx = idx
self.diff_values.append(((col.name, idx), (arra[idx], arrb[idx])))
total_values = len(self.a) * len(self.a.dtype.fields)
self.diff_ratio = float(self.diff_total) / float(total_values)
def _report(self):
if self.diff_column_count:
self._writeln(" Tables have different number of columns:")
self._writeln(f" a: {self.diff_column_count[0]}")
self._writeln(f" b: {self.diff_column_count[1]}")
if self.diff_column_names:
# Show columns with names unique to either table
for name in self.diff_column_names[0]:
format = self.diff_columns[0][name.lower()].format
self._writeln(f" Extra column {name} of format {format} in a")
for name in self.diff_column_names[1]:
format = self.diff_columns[1][name.lower()].format
self._writeln(f" Extra column {name} of format {format} in b")
col_attrs = dict(_COL_ATTRS)
# Now go through each table again and show columns with common
# names but other property differences...
for col_attr, vals in self.diff_column_attributes:
name, attr = col_attr
self._writeln(f" Column {name} has different {col_attrs[attr]}:")
report_diff_values(
vals[0],
vals[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_rows:
self._writeln(" Table rows differ:")
self._writeln(f" a: {self.diff_rows[0]}")
self._writeln(f" b: {self.diff_rows[1]}")
self._writeln(" No further data comparison performed.")
return
if not self.diff_values:
return
# Finally, let's go through and report column data differences:
for (col, row), values in self.diff_values:
self._writeln(f" Column {col} data differs in row {row}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_values and self.numdiffs < self.diff_total:
self._writeln(
f" ...{self.diff_total - self.numdiffs} additional difference(s) found."
)
if self.diff_total > self.numdiffs:
self._writeln(" ...")
self._writeln(
f" {self.diff_total} different table data element(s) found "
f"({self.diff_ratio:.2%} different)."
)
def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0):
"""
Write a diff between two header keyword values or comments to the specified
file-like object.
"""
if keyword in diffs:
vals = diffs[keyword]
for idx, val in enumerate(vals):
if val is None:
continue
if idx == 0:
dup = ""
else:
dup = f"[{idx + 1}]"
fileobj.write(
textwrap.indent(
f" Keyword {keyword:8}{dup} has different {attr}:\n", ind * " "
)
)
report_diff_values(val[0], val[1], fileobj=fileobj, indent_width=ind + 1)
|
TableDataDiff
|
python
|
getsentry__sentry
|
tests/sentry/sentry_apps/api/endpoints/test_sentry_apps_stats.py
|
{
"start": 292,
"end": 3063
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-sentry-apps-stats"
method = "get"
def setUp(self) -> None:
self.superuser = self.create_user(is_superuser=True)
self.org_two = self.create_organization()
self.app_one = self.create_sentry_app(
name="Test", organization=self.org_two, published=True
)
self.app_one_avatar = self.create_sentry_app_avatar(
sentry_app=self.app_one, color=True, avatar_type=0
)
self.app_two = self.create_sentry_app(name="Testin", organization=self.organization)
self.create_sentry_app_installation(slug=self.app_one.slug, organization=self.organization)
self.create_sentry_app_installation(slug=self.app_two.slug, organization=self.organization)
def _check_response(self, response: Response) -> None:
assert {
"id": self.app_two.id,
"uuid": self.app_two.uuid,
"slug": self.app_two.slug,
"name": self.app_two.name,
"installs": 1,
"avatars": [],
} in orjson.loads(response.content)
assert {
"id": self.app_one.id,
"uuid": self.app_one.uuid,
"slug": self.app_one.slug,
"name": self.app_one.name,
"installs": 1,
"avatars": [serialize(self.app_one_avatar)],
} in orjson.loads(response.content)
def test_superuser_has_access(self) -> None:
self.login_as(user=self.superuser, superuser=True)
response = self.get_success_response(status_code=200)
self._check_response(response)
@override_options({"staff.ga-rollout": True})
def test_staff_has_access(self) -> None:
staff_user = self.create_user(is_staff=True)
self.login_as(user=staff_user, staff=True)
response = self.get_success_response(status_code=200)
self._check_response(response)
def test_nonsuperusers_have_no_access(self) -> None:
self.login_as(user=self.user)
self.get_error_response(status_code=403)
def test_per_page(self) -> None:
self.login_as(user=self.superuser, superuser=True)
self.create_sentry_app_installation(
slug=self.app_one.slug, organization=self.create_organization()
)
for i in range(3):
app = self.create_sentry_app(
name=f"Test {i}", organization=self.org_two, published=True
)
self.create_sentry_app_installation(slug=app.slug, organization=self.organization)
response = self.get_success_response(per_page=2, status_code=200)
assert len(response.data) == 2 # honors per_page
assert response.data[0]["installs"] == 2 # sorted by installs
|
SentryAppsStatsTest
|
python
|
huggingface__transformers
|
src/transformers/models/electra/configuration_electra.py
|
{
"start": 867,
"end": 7711
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ElectraModel`]. It is
used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA
[google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ElectraModel`].
embedding_size (`int`, *optional*, defaults to 128):
Dimensionality of the encoder layers and the pooler layer.
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`ElectraModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
summary_type (`str`, *optional*, defaults to `"first"`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation.
summary_last_dropout (`float`, *optional*, defaults to 0.0):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
The dropout ratio to be used after the projection and activation.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import ElectraConfig, ElectraModel
>>> # Initializing a ELECTRA electra-base-uncased style configuration
>>> configuration = ElectraConfig()
>>> # Initializing a model (with random weights) from the electra-base-uncased style configuration
>>> model = ElectraModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "electra"
def __init__(
self,
vocab_size=30522,
embedding_size=128,
hidden_size=256,
num_hidden_layers=12,
num_attention_heads=4,
intermediate_size=1024,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
summary_type="first",
summary_use_proj=True,
summary_activation="gelu",
summary_last_dropout=0.1,
pad_token_id=0,
use_cache=True,
classifier_dropout=None,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_last_dropout = summary_last_dropout
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
__all__ = ["ElectraConfig"]
|
ElectraConfig
|
python
|
ray-project__ray
|
python/ray/data/random_access_dataset.py
|
{
"start": 9766,
"end": 10209
}
|
class ____:
def __init__(self, arrow_col):
self.arrow_col = arrow_col
def __getitem__(self, i):
return self.arrow_col[i].as_py()
def __len__(self):
return len(self.arrow_col)
def _get_bounds(block, key):
if len(block) == 0:
return None
b = (block[key][0], block[key][len(block) - 1])
if isinstance(block, pa.Table):
b = (b[0].as_py(), b[1].as_py())
return b
|
_ArrowListWrapper
|
python
|
scikit-learn__scikit-learn
|
sklearn/linear_model/_glm/glm.py
|
{
"start": 21880,
"end": 26093
}
|
class ____(_GeneralizedLinearRegressor):
"""Generalized Linear Model with a Gamma distribution.
This regressor uses the 'log' link function.
Read more in the :ref:`User Guide <Generalized_linear_models>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the L2 penalty term and determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
Values of `alpha` must be in the range `[0.0, inf)`.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor `X @ coef_ + intercept_`.
solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
Algorithm to use in the optimization problem:
'lbfgs'
Calls scipy's L-BFGS-B optimizer.
'newton-cholesky'
Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
iterated reweighted least squares) with an inner Cholesky based solver.
This solver is a good choice for `n_samples` >> `n_features`, especially
with one-hot encoded categorical features with rare categories. Be aware
that the memory usage of this solver has a quadratic dependency on
`n_features` because it explicitly computes the Hessian matrix.
.. versionadded:: 1.2
max_iter : int, default=100
The maximal number of iterations for the solver.
Values must be in the range `[1, inf)`.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
Values must be in the range `(0.0, inf)`.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for `coef_` and `intercept_`.
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Values must be in the range `[0, inf)`.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_iter_ : int
Actual number of iterations used in the solver.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PoissonRegressor : Generalized Linear Model with a Poisson distribution.
TweedieRegressor : Generalized Linear Model with a Tweedie distribution.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.GammaRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [19, 26, 33, 30]
>>> clf.fit(X, y)
GammaRegressor()
>>> clf.score(X, y)
np.float64(0.773)
>>> clf.coef_
array([0.073, 0.067])
>>> clf.intercept_
np.float64(2.896)
>>> clf.predict([[1, 0], [2, 8]])
array([19.483, 35.795])
"""
_parameter_constraints: dict = {
**_GeneralizedLinearRegressor._parameter_constraints
}
def __init__(
self,
*,
alpha=1.0,
fit_intercept=True,
solver="lbfgs",
max_iter=100,
tol=1e-4,
warm_start=False,
verbose=0,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
tol=tol,
warm_start=warm_start,
verbose=verbose,
)
def _get_loss(self):
return HalfGammaLoss()
|
GammaRegressor
|
python
|
pypa__warehouse
|
warehouse/subscriptions/interfaces.py
|
{
"start": 4436,
"end": 4494
}
|
class ____(IGenericBillingService):
pass
|
IBillingService
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.