language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/test_sympy_utils.py | {
"start": 34464,
"end": 34826
} | class ____(TestCase):
def test_typed_expr(self):
I = Identity(1)
typed_I = TypedExpr(I, torch.int32)
self.assertEqual(typed_I.expr, 1)
instantiate_parametrized_tests(TestValueRanges)
instantiate_parametrized_tests(TestSympyInterp)
instantiate_parametrized_tests(TestSympySolve)
if __name__ == "__main__":
run_tests()
| TestTypedExpr |
python | ipython__ipython | IPython/core/profiledir.py | {
"start": 899,
"end": 8459
} | class ____(LoggingConfigurable):
"""An object to manage the profile directory and its resources.
The profile directory is used by all IPython applications, to manage
configuration, logging and security.
This object knows how to find, create and manage these directories. This
should be used by any code that wants to handle profiles.
"""
security_dir_name = Unicode('security')
log_dir_name = Unicode('log')
startup_dir_name = Unicode('startup')
pid_dir_name = Unicode('pid')
static_dir_name = Unicode('static')
security_dir = Unicode(u'')
log_dir = Unicode(u'')
startup_dir = Unicode(u'')
pid_dir = Unicode(u'')
static_dir = Unicode(u'')
location = Unicode(u'',
help="""Set the profile location directly. This overrides the logic used by the
`profile` option.""",
).tag(config=True)
_location_isset = Bool(False) # flag for detecting multiply set location
@observe('location')
def _location_changed(self, change):
if self._location_isset:
raise RuntimeError("Cannot set profile location more than once.")
self._location_isset = True
new = change['new']
ensure_dir_exists(new)
# ensure config files exist:
self.security_dir = os.path.join(new, self.security_dir_name)
self.log_dir = os.path.join(new, self.log_dir_name)
self.startup_dir = os.path.join(new, self.startup_dir_name)
self.pid_dir = os.path.join(new, self.pid_dir_name)
self.static_dir = os.path.join(new, self.static_dir_name)
self.check_dirs()
def _mkdir(self, path: str, mode: Optional[int] = None) -> bool:
"""ensure a directory exists at a given path
This is a version of os.mkdir, with the following differences:
- returns whether the directory has been created or not.
- ignores EEXIST, protecting against race conditions where
the dir may have been created in between the check and
the creation
- sets permissions if requested and the dir already exists
Parameters
----------
path: str
path of the dir to create
mode: int
see `mode` of `os.mkdir`
Returns
-------
bool:
returns True if it created the directory, False otherwise
"""
if os.path.exists(path):
if mode and os.stat(path).st_mode != mode:
try:
os.chmod(path, mode)
except OSError:
self.log.warning(
"Could not set permissions on %s",
path
)
return False
try:
if mode:
os.mkdir(path, mode)
else:
os.mkdir(path)
except OSError as e:
if e.errno == errno.EEXIST:
return False
else:
raise
return True
@observe('log_dir')
def check_log_dir(self, change=None):
self._mkdir(self.log_dir)
@observe('startup_dir')
def check_startup_dir(self, change=None):
if self._mkdir(self.startup_dir):
readme = os.path.join(self.startup_dir, "README")
src = os.path.join(
get_ipython_package_dir(), "core", "profile", "README_STARTUP"
)
if os.path.exists(src):
if not os.path.exists(readme):
shutil.copy(src, readme)
else:
self.log.warning(
"Could not copy README_STARTUP to startup dir. Source file %s does not exist.",
src,
)
@observe('security_dir')
def check_security_dir(self, change=None):
self._mkdir(self.security_dir, 0o40700)
@observe('pid_dir')
def check_pid_dir(self, change=None):
self._mkdir(self.pid_dir, 0o40700)
def check_dirs(self):
self.check_security_dir()
self.check_log_dir()
self.check_pid_dir()
self.check_startup_dir()
def copy_config_file(self, config_file: str, path: Path, overwrite=False) -> bool:
"""Copy a default config file into the active profile directory.
Default configuration files are kept in :mod:`IPython.core.profile`.
This function moves these from that location to the working profile
directory.
"""
dst = Path(os.path.join(self.location, config_file))
if dst.exists() and not overwrite:
return False
if path is None:
path = os.path.join(get_ipython_package_dir(), u'core', u'profile', u'default')
assert isinstance(path, Path)
src = path / config_file
shutil.copy(src, dst)
return True
@classmethod
def create_profile_dir(cls, profile_dir, config=None):
"""Create a new profile directory given a full path.
Parameters
----------
profile_dir : str
The full path to the profile directory. If it does exist, it will
be used. If not, it will be created.
"""
return cls(location=profile_dir, config=config)
@classmethod
def create_profile_dir_by_name(cls, path, name=u'default', config=None):
"""Create a profile dir by profile name and path.
Parameters
----------
path : unicode
The path (directory) to put the profile directory in.
name : unicode
The name of the profile. The name of the profile directory will
be "profile_<profile>".
"""
if not os.path.isdir(path):
raise ProfileDirError('Directory not found: %s' % path)
profile_dir = os.path.join(path, u'profile_' + name)
return cls(location=profile_dir, config=config)
@classmethod
def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
"""Find an existing profile dir by profile name, return its ProfileDir.
This searches through a sequence of paths for a profile dir. If it
is not found, a :class:`ProfileDirError` exception will be raised.
The search path algorithm is:
1. ``os.getcwd()`` # removed for security reason.
2. ``ipython_dir``
Parameters
----------
ipython_dir : unicode or str
The IPython directory to use.
name : unicode or str
The name of the profile. The name of the profile directory
will be "profile_<profile>".
"""
dirname = u'profile_' + name
paths = [ipython_dir]
for p in paths:
profile_dir = os.path.join(p, dirname)
if os.path.isdir(profile_dir):
return cls(location=profile_dir, config=config)
else:
raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
@classmethod
def find_profile_dir(cls, profile_dir, config=None):
"""Find/create a profile dir and return its ProfileDir.
This will create the profile directory if it doesn't exist.
Parameters
----------
profile_dir : unicode or str
The path of the profile directory.
"""
profile_dir = expand_path(profile_dir)
if not os.path.isdir(profile_dir):
raise ProfileDirError('Profile directory not found: %s' % profile_dir)
return cls(location=profile_dir, config=config)
| ProfileDir |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws_tests/ecs_tests/stubbed_ecs.py | {
"start": 2324,
"end": 2763
} | class ____:
def __init__(self):
self.tasks = defaultdict(list)
self.task_definitions = defaultdict(list)
self.tags = defaultdict(list)
self.account_settings = {}
self.default_account_settings = {"taskLongArnFormat": "enabled"}
self.register_task_definition_locks = defaultdict(threading.Lock)
# self.register_task_definition_locks["concurrent"].acquire(blocking=False)
| StubStorage |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-monsterapi/llama_index/llms/monsterapi/base.py | {
"start": 608,
"end": 5730
} | class ____(OpenAI):
model_info: dict = Field(
description="Model info field with pricing and other llm model information in json structure.",
default={},
)
"""MonsterAPI LLM.
Monster Deploy enables you to host any vLLM supported large language model (LLM) like Tinyllama, Mixtral, Phi-2 etc as a rest API endpoint on MonsterAPI's cost optimised GPU cloud.
With MonsterAPI's integration in Llama index, you can use your deployed LLM API endpoints to create RAG system or RAG bot for use cases such as:
- Answering questions on your documents
- Improving the content of your documents
- Finding context of importance in your documents
Once deployment is launched use the base_url and api_auth_token once deployment is live and use them below.
Note: When using LLama index to access Monster Deploy LLMs, you need to create a prompt with required template and send compiled prompt as input.
See `LLama Index Prompt Template Usage example` section for more details.
see (https://developer.monsterapi.ai/docs/monster-deploy-beta) for more details
Once deployment is launched use the base_url and api_auth_token once deployment is live and use them below.
Note: When using LLama index to access Monster Deploy LLMs, you need to create a prompt with reqhired template and send compiled prompt as input. see section `LLama Index Prompt Template
Usage example` for more details.
Examples:
`pip install llama-index-llms-monsterapi`
1. MonsterAPI Private LLM Deployment use case
```python
from llama_index.llms.monsterapi import MonsterLLM
# User monsterAPI Deploy service to launch a deployment
# then get api_endpoint and api_auth_token and use them as api_base and api_key respectively.
llm = MonsterLLM(
model = "whatever is the basemodel used to deploy the llm",
api_base="https://ecc7deb6-26e0-419b-a7f2-0deb934af29a.monsterapi.ai",
api_key="a0f8a6ba-c32f-4407-af0c-169f1915490c",
temperature=0.75,
)
response = llm.complete("What is the capital of France?")
```
2. Monster API General Available LLMs
```python3
from llama_index.llms.monsterapi import MonsterLLM
llm = MonsterLLM(
model="microsoft/Phi-3-mini-4k-instruct"
)
response = llm.complete("What is the capital of France?")
print(str(response))
```
"""
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_base = get_from_param_or_env("api_base", api_base, "MONSTER_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "MONSTER_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
self.model_info = self._fetch_model_details(api_base, api_key)
@classmethod
def class_name(cls) -> str:
return "MonsterAPI LLMs"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=self._modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
is_function_calling_model=False,
)
@property
def _is_chat_model(self) -> bool:
return True
def _fetch_model_details(self, api_base: str, api_key: str):
headers = {"Authorization": f"Bearer {api_key}", "accept": "application/json"}
response = requests.get(f"{api_base}/models/info", headers=headers)
response.raise_for_status()
details = response.json()
return details["maximum_context_length"]
def _modelname_to_contextsize(self, model_name):
return self.model_info.get(model_name)
| MonsterLLM |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py | {
"start": 1808,
"end": 1887
} | class ____(RawEvent, Event):
type: EventType = EventType.RAW
| RawWorkflowEvent |
python | encode__django-rest-framework | rest_framework/parsers.py | {
"start": 796,
"end": 907
} | class ____:
def __init__(self, data, files):
self.data = data
self.files = files
| DataAndFiles |
python | Netflix__metaflow | test/cmd/develop/test_stub_generator.py | {
"start": 220,
"end": 290
} | class ____:
"""Test class for stub generation"""
pass
| TestClass |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-airbyte-gong/llama_index/readers/airbyte_gong/base.py | {
"start": 126,
"end": 680
} | class ____(AirbyteCDKReader):
"""
AirbyteGongReader reader.
Retrieve documents from Gong
Args:
config: The config object for the gong source.
"""
def __init__(
self,
config: Mapping[str, Any],
record_handler: Optional[RecordHandler] = None,
) -> None:
"""Initialize with parameters."""
import source_gong
super().__init__(
source_class=source_gong.SourceGong,
config=config,
record_handler=record_handler,
)
| AirbyteGongReader |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 957753,
"end": 958485
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for SavedReply."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("SavedReplyEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("SavedReply"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| SavedReplyConnection |
python | openai__gym | gym/error.py | {
"start": 3285,
"end": 3371
} | class ____(APIError):
"""Deprecated, to be removed at gym 1.0."""
| APIConnectionError |
python | PrefectHQ__prefect | tests/utilities/test_annotations.py | {
"start": 351,
"end": 2105
} | class ____:
@pytest.mark.parametrize(
"value",
[
"hello",
42,
3.14,
True,
None,
["a", 1, True],
{"some", "set"},
{
"string": "value",
"number": 42,
"list": [1, "two", 3.0],
"nested": {"a": [True, None]},
},
],
ids=["str", "int", "float", "bool", "none", "list", "set", "nested_dict"],
)
def test_round_trip(self, value: Any):
assert freeze(value).unfreeze() == value
@pytest.mark.parametrize(
"value",
[
httpx.AsyncClient(),
lambda: None,
type("foo", (object,), {}),
],
ids=["httpx_client", "lambda", "type"],
)
def test_non_json_serializable_raises(self, value: Any):
"""Test that freeze rejects non-JSON serializable types."""
with pytest.raises(ValueError, match="Value must be JSON serializable"):
freeze(value)
@pytest.mark.parametrize(
"value,expected_type",
[
("test", str),
(42, int),
(3.14, float),
(True, bool),
(None, type(None)),
],
ids=["str", "int", "float", "bool", "none"],
)
def test_frozen_parameters_are_serialized_as_json(
self, value: Any, expected_type: type
):
frozen = freeze(value)
# assert it works even if we don't parameterize the expected type
assert TypeAdapter(freeze).dump_python(frozen) == value
# assert it works if we do parameterize the expected type
assert TypeAdapter(freeze[expected_type]).dump_python(frozen) == value
| TestFreeze |
python | python__mypy | mypyc/ir/rtypes.py | {
"start": 34974,
"end": 37681
} | class ____(RType):
"""Fixed-length C array type (for example, int[5]).
Note that the implementation is a bit limited, and these can basically
be only used for local variables that are initialized in one location.
"""
def __init__(self, item_type: RType, length: int) -> None:
self.item_type = item_type
# Number of items
self.length = length
self.is_refcounted = False
def accept(self, visitor: RTypeVisitor[T]) -> T:
return visitor.visit_rarray(self)
@property
def may_be_immortal(self) -> bool:
return False
def __str__(self) -> str:
return f"{self.item_type}[{self.length}]"
def __repr__(self) -> str:
return f"<RArray {self.item_type!r}[{self.length}]>"
def __eq__(self, other: object) -> TypeGuard[RArray]:
return (
isinstance(other, RArray)
and self.item_type == other.item_type
and self.length == other.length
)
def __hash__(self) -> int:
return hash((self.item_type, self.length))
def serialize(self) -> JsonDict:
assert False
@classmethod
def deserialize(cls, data: JsonDict, ctx: DeserMaps) -> RArray:
assert False
PyObject = RStruct(
name="PyObject",
names=["ob_refcnt", "ob_type"],
types=[c_pyssize_t_rprimitive, pointer_rprimitive],
)
PyVarObject = RStruct(
name="PyVarObject", names=["ob_base", "ob_size"], types=[PyObject, c_pyssize_t_rprimitive]
)
setentry = RStruct(
name="setentry", names=["key", "hash"], types=[pointer_rprimitive, c_pyssize_t_rprimitive]
)
smalltable = RStruct(name="smalltable", names=[], types=[setentry] * 8)
PySetObject = RStruct(
name="PySetObject",
names=[
"ob_base",
"fill",
"used",
"mask",
"table",
"hash",
"finger",
"smalltable",
"weakreflist",
],
types=[
PyObject,
c_pyssize_t_rprimitive,
c_pyssize_t_rprimitive,
c_pyssize_t_rprimitive,
pointer_rprimitive,
c_pyssize_t_rprimitive,
c_pyssize_t_rprimitive,
smalltable,
pointer_rprimitive,
],
)
PyListObject = RStruct(
name="PyListObject",
names=["ob_base", "ob_item", "allocated"],
types=[PyVarObject, pointer_rprimitive, c_pyssize_t_rprimitive],
)
def check_native_int_range(rtype: RPrimitive, n: int) -> bool:
"""Is n within the range of a native, fixed-width int type?
Assume the type is a fixed-width int type.
"""
if not rtype.is_signed:
return 0 <= n < (1 << (8 * rtype.size))
else:
limit = 1 << (rtype.size * 8 - 1)
return -limit <= n < limit
| RArray |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 8057,
"end": 8195
} | class ____(EllipticCurve):
name = "sect163k1"
key_size = 163
group_order = 0x4000000000000000000020108A2E0CC0D99F8A5EF
| SECT163K1 |
python | ethereum__web3.py | web3/geth.py | {
"start": 1642,
"end": 2729
} | class ____(Module):
"""
https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-admin
"""
is_async = False
add_peer: Method[Callable[[EnodeURI], bool]] = Method(
RPC.admin_addPeer,
mungers=[default_root_munger],
)
datadir: Method[Callable[[], str]] = Method(
RPC.admin_datadir,
is_property=True,
)
node_info: Method[Callable[[], NodeInfo]] = Method(
RPC.admin_nodeInfo,
is_property=True,
)
peers: Method[Callable[[], list[Peer]]] = Method(
RPC.admin_peers,
is_property=True,
)
start_http: Method[ServerConnection] = Method(
RPC.admin_startHTTP,
mungers=[admin_start_params_munger],
)
start_ws: Method[ServerConnection] = Method(
RPC.admin_startWS,
mungers=[admin_start_params_munger],
)
stop_http: Method[Callable[[], bool]] = Method(
RPC.admin_stopHTTP,
is_property=True,
)
stop_ws: Method[Callable[[], bool]] = Method(
RPC.admin_stopWS,
is_property=True,
)
| GethAdmin |
python | encode__django-rest-framework | rest_framework/renderers.py | {
"start": 37024,
"end": 39913
} | class ____:
def get_schema(self, instance):
CLASS_TO_TYPENAME = {
coreschema.Object: 'object',
coreschema.Array: 'array',
coreschema.Number: 'number',
coreschema.Integer: 'integer',
coreschema.String: 'string',
coreschema.Boolean: 'boolean',
}
schema = {}
if instance.__class__ in CLASS_TO_TYPENAME:
schema['type'] = CLASS_TO_TYPENAME[instance.__class__]
schema['title'] = instance.title
schema['description'] = instance.description
if hasattr(instance, 'enum'):
schema['enum'] = instance.enum
return schema
def get_parameters(self, link):
parameters = []
for field in link.fields:
if field.location not in ['path', 'query']:
continue
parameter = {
'name': field.name,
'in': field.location,
}
if field.required:
parameter['required'] = True
if field.description:
parameter['description'] = field.description
if field.schema:
parameter['schema'] = self.get_schema(field.schema)
parameters.append(parameter)
return parameters
def get_operation(self, link, name, tag):
operation_id = "%s_%s" % (tag, name) if tag else name
parameters = self.get_parameters(link)
operation = {
'operationId': operation_id,
}
if link.title:
operation['summary'] = link.title
if link.description:
operation['description'] = link.description
if parameters:
operation['parameters'] = parameters
if tag:
operation['tags'] = [tag]
return operation
def get_paths(self, document):
paths = {}
tag = None
for name, link in document.links.items():
path = parse.urlparse(link.url).path
method = link.action.lower()
paths.setdefault(path, {})
paths[path][method] = self.get_operation(link, name, tag=tag)
for tag, section in document.data.items():
for name, link in section.links.items():
path = parse.urlparse(link.url).path
method = link.action.lower()
paths.setdefault(path, {})
paths[path][method] = self.get_operation(link, name, tag=tag)
return paths
def get_structure(self, data):
return {
'openapi': '3.0.0',
'info': {
'version': '',
'title': data.title,
'description': data.description
},
'servers': [{
'url': data.url
}],
'paths': self.get_paths(data)
}
| _BaseOpenAPIRenderer |
python | google__jax | jax/_src/stages.py | {
"start": 36741,
"end": 38363
} | class ____(Exception):
pass
def _find_arg_mismatch(arg_list, fails, fun_name):
mismatched_args_msg = []
def mismatch(err):
for name, inp_da, aval in arg_list:
if err.m_type == MismatchType.ARG_SHARDING and err.da == inp_da:
mismatched_args_msg.append(
f"argument {name} of {fun_name} with shape {aval.str_short()} and "
f"{err._dev_ids_plat_str}")
break
first_err, second_err = fails
mismatch(first_err)
mismatch(second_err)
return mismatched_args_msg
def _device_assignment_mismatch_error(fun_name, fails, args_flat, api_name,
arg_names):
arg_list = []
if arg_names is None:
arg_names = [''] * len(args_flat)
for a, n in zip(args_flat, arg_names):
da = a.sharding._device_assignment if a.sharding is not None else None
arg_list.append((n, da, a.aval))
mismatched_args_msg = _find_arg_mismatch(arg_list, fails, fun_name)
if len(mismatched_args_msg) == 2:
first, second = mismatched_args_msg # pytype: disable=bad-unpacking
extra_msg = f" Got {first} and {second}"
elif len(mismatched_args_msg) == 1:
first, second = fails
# Choose the failure left which is not already covered by ARG_SHARDING.
left = second if first.m_type == MismatchType.ARG_SHARDING else first
extra_msg = f" Got {mismatched_args_msg[0]} and{left._str(api_name)}"
else:
first, second = fails
extra_msg = f" Got{first._str(api_name)} and{second._str(api_name)}"
msg = (f"Received incompatible devices for {api_name}ted computation.{extra_msg}")
return msg
| DeviceAssignmentMismatchError |
python | kamyu104__LeetCode-Solutions | Python/find-k-pairs-with-smallest-sums.py | {
"start": 1121,
"end": 1392
} | class ____(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
return nsmallest(k, product(nums1, nums2), key=sum)
| Solution2 |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_ascii.py | {
"start": 790,
"end": 2356
} | class ____(ColumnMapMetricProvider):
"""
Determines whether column values consist only of ascii characters. If value consists of any non-ascii character
then that value will not pass.
"""
# This is the id string that will be used to reference your metric.
# Please see {some doc} for information on how to choose an id string for your Metric.
condition_metric_name = "column_values.are_ascii"
# This method defines the business logic for evaluating your metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
def check_if_ascii(x):
return str(x).isascii()
column_ascii_check = column.apply(check_if_ascii)
return column_ascii_check
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# return column.in_([3])
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
def is_ascii(val):
return str(val).isascii()
is_ascii_udf = F.udf(is_ascii, pyspark.types.BooleanType())
return is_ascii_udf(column)
# This class defines the Expectation itself
# The main business logic for calculation lives here.
| ColumnValuesAreAscii |
python | kamyu104__LeetCode-Solutions | Python/count-stepping-numbers-in-range.py | {
"start": 34,
"end": 1078
} | class ____(object):
def countSteppingNumbers(self, low, high):
"""
:type low: str
:type high: str
:rtype: int
"""
MOD = 10**9+7
def f(s):
dp = [[0]*10 for _ in xrange(2)]
for j in xrange(1, ord(s[0])-ord('0')+1):
dp[0][j] = 1
prefix = True
for i in xrange(1, len(s)):
for j in xrange(10):
dp[i%2][j] = int(j != 0)
if j-1 >= 0:
dp[i%2][j] = (dp[i%2][j]+(dp[(i-1)%2][j-1]-int(prefix and (ord(s[i-1])-ord('0')) == j-1 and j > (ord(s[i])-ord('0')))))%MOD
if j+1 < 10:
dp[i%2][j] = (dp[i%2][j]+(dp[(i-1)%2][j+1]-int(prefix and (ord(s[i-1])-ord('0')) == j+1 and j > (ord(s[i])-ord('0')))))%MOD
if abs(ord(s[i])-ord(s[i-1])) != 1:
prefix = False
return reduce(lambda x, y: (x+y)%MOD, dp[(len(s)-1)%2])
return (f(high)-f(str(int(low)-1)))%MOD
| Solution |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 5687,
"end": 5993
} | class ____:
def __init__(self, name, cname, qualified_name, type, lineno):
self.name = name
self.cname = cname
self.qualified_name = qualified_name
self.type = type
self.lineno = int(lineno)
def __repr__(self):
return simple_repr(self)
| CythonVariable |
python | google__jax | jax/_src/test_util.py | {
"start": 25417,
"end": 25539
} | class ____:
def __len__(self): return 0
def __getitem__(self, i): raise IndexError(f"index {i} out of range.")
| ScalarShape |
python | optuna__optuna | optuna/storages/_grpc/auto_generated/api_pb2_grpc.py | {
"start": 19470,
"end": 35256
} | class ____(object):
"""*
Optuna storage service defines APIs to interact with the storage.
"""
@staticmethod
def CreateNewStudy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/CreateNewStudy',
api__pb2.CreateNewStudyRequest.SerializeToString,
api__pb2.CreateNewStudyReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def DeleteStudy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/DeleteStudy',
api__pb2.DeleteStudyRequest.SerializeToString,
api__pb2.DeleteStudyReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SetStudyUserAttribute(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/SetStudyUserAttribute',
api__pb2.SetStudyUserAttributeRequest.SerializeToString,
api__pb2.SetStudyUserAttributeReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SetStudySystemAttribute(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/SetStudySystemAttribute',
api__pb2.SetStudySystemAttributeRequest.SerializeToString,
api__pb2.SetStudySystemAttributeReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetStudyIdFromName(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/GetStudyIdFromName',
api__pb2.GetStudyIdFromNameRequest.SerializeToString,
api__pb2.GetStudyIdFromNameReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetStudyNameFromId(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/GetStudyNameFromId',
api__pb2.GetStudyNameFromIdRequest.SerializeToString,
api__pb2.GetStudyNameFromIdReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetStudyDirections(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/GetStudyDirections',
api__pb2.GetStudyDirectionsRequest.SerializeToString,
api__pb2.GetStudyDirectionsReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetStudyUserAttributes(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/GetStudyUserAttributes',
api__pb2.GetStudyUserAttributesRequest.SerializeToString,
api__pb2.GetStudyUserAttributesReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetStudySystemAttributes(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/GetStudySystemAttributes',
api__pb2.GetStudySystemAttributesRequest.SerializeToString,
api__pb2.GetStudySystemAttributesReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetAllStudies(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/GetAllStudies',
api__pb2.GetAllStudiesRequest.SerializeToString,
api__pb2.GetAllStudiesReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def CreateNewTrial(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/CreateNewTrial',
api__pb2.CreateNewTrialRequest.SerializeToString,
api__pb2.CreateNewTrialReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SetTrialParameter(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/SetTrialParameter',
api__pb2.SetTrialParameterRequest.SerializeToString,
api__pb2.SetTrialParameterReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetTrialIdFromStudyIdTrialNumber(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/GetTrialIdFromStudyIdTrialNumber',
api__pb2.GetTrialIdFromStudyIdTrialNumberRequest.SerializeToString,
api__pb2.GetTrialIdFromStudyIdTrialNumberReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SetTrialStateValues(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/SetTrialStateValues',
api__pb2.SetTrialStateValuesRequest.SerializeToString,
api__pb2.SetTrialStateValuesReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SetTrialIntermediateValue(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/SetTrialIntermediateValue',
api__pb2.SetTrialIntermediateValueRequest.SerializeToString,
api__pb2.SetTrialIntermediateValueReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SetTrialUserAttribute(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/SetTrialUserAttribute',
api__pb2.SetTrialUserAttributeRequest.SerializeToString,
api__pb2.SetTrialUserAttributeReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SetTrialSystemAttribute(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/SetTrialSystemAttribute',
api__pb2.SetTrialSystemAttributeRequest.SerializeToString,
api__pb2.SetTrialSystemAttributeReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetTrial(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/GetTrial',
api__pb2.GetTrialRequest.SerializeToString,
api__pb2.GetTrialReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetTrials(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/optuna.StorageService/GetTrials',
api__pb2.GetTrialsRequest.SerializeToString,
api__pb2.GetTrialsReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
| StorageService |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-operations-to-make-x-and-y-equal.py | {
"start": 43,
"end": 546
} | class ____(object):
def minimumOperationsToMakeEqual(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
def memoization(x):
if y >= x:
return y-x
if x not in lookup:
lookup[x] = min(x-y, min(min(x%d, d-x%d)+memoization(x//d+int(d-x%d < x%d))+1 for d in (5, 11)))
return lookup[x]
lookup = {}
return memoization(x)
# Time: O(x)
# Space: O(x)
# bfs
| Solution |
python | kamyu104__LeetCode-Solutions | Python/maximum-points-inside-the-square.py | {
"start": 67,
"end": 604
} | class ____(object):
def maxPointsInsideSquare(self, points, s):
"""
:type points: List[List[int]]
:type s: str
:rtype: int
"""
INF = float("inf")
lookup = [INF for _ in xrange(26)]
d = INF
for c, (x, y) in itertools.izip(s, points):
k = ord(c)-ord('a')
mn2 = max(abs(x), abs(y))
if mn2 < lookup[k]:
mn2, lookup[k] = lookup[k], mn2
d = min(d, mn2)
return sum(mn1 < d for mn1 in lookup)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/version_utils.py | {
"start": 2447,
"end": 5138
} | class ____(object):
"""Chooses between Keras v1 and v2 TensorBoard callback class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
use_v2 = should_use_v2()
start_cls = cls
cls = swap_class(start_cls, callbacks.TensorBoard, callbacks_v1.TensorBoard,
use_v2)
if start_cls == callbacks_v1.TensorBoard and cls == callbacks.TensorBoard:
# Since the v2 class is not a subclass of the v1 class, __init__ has to
# be called manually.
return cls(*args, **kwargs)
return super(TensorBoardVersionSelector, cls).__new__(cls)
def should_use_v2():
"""Determine if v1 or v2 version should be used."""
if context.executing_eagerly():
return True
elif ops.executing_eagerly_outside_functions():
# Check for a v1 `wrap_function` FuncGraph.
# Code inside a `wrap_function` is treated like v1 code.
graph = ops.get_default_graph()
if (getattr(graph, "name", False) and
graph.name.startswith("wrapped_function")):
return False
return True
else:
return False
def swap_class(cls, v2_cls, v1_cls, use_v2):
"""Swaps in v2_cls or v1_cls depending on graph mode."""
if cls == object:
return cls
if cls in (v2_cls, v1_cls):
return v2_cls if use_v2 else v1_cls
# Recursively search superclasses to swap in the right Keras class.
new_bases = []
for base in cls.__bases__:
if ((use_v2 and issubclass(base, v1_cls)
# `v1_cls` often extends `v2_cls`, so it may still call `swap_class`
# even if it doesn't need to. That being said, it may be the safest
# not to over optimize this logic for the sake of correctness,
# especially if we swap v1 & v2 classes that don't extend each other,
# or when the inheritance order is different.
or (not use_v2 and issubclass(base, v2_cls)))):
new_base = swap_class(base, v2_cls, v1_cls, use_v2)
else:
new_base = base
new_bases.append(new_base)
cls.__bases__ = tuple(new_bases)
return cls
def disallow_legacy_graph(cls_name, method_name):
if not ops.executing_eagerly_outside_functions():
error_msg = (
"Calling `{cls_name}.{method_name}` in graph mode is not supported "
"when the `{cls_name}` instance was constructed with eager mode "
"enabled. Please construct your `{cls_name}` instance in graph mode or"
" call `{cls_name}.{method_name}` with eager mode enabled.")
error_msg = error_msg.format(cls_name=cls_name, method_name=method_name)
raise ValueError(error_msg)
def is_v1_layer_or_model(obj):
return isinstance(obj, (base_layer_v1.Layer, training_v1.Model))
| TensorBoardVersionSelector |
python | readthedocs__readthedocs.org | readthedocs/proxito/views/serve.py | {
"start": 35839,
"end": 36952
} | class ____(CDNCacheControlMixin, CDNCacheTagsMixin, ServeDocsMixin, View):
"""
Serve static files from the same domain the docs are being served from.
This is basically a proxy for ``STATIC_URL``.
"""
project_cache_tag = "rtd-staticfiles"
# This view can always be cached,
# since these are static files used for all projects.
cache_response = True
def get(self, request, filename):
try:
return self._serve_static_file(request=request, filename=filename)
except InvalidPathError:
raise Http404
def _get_cache_tags(self):
"""
Add an additional *global* tag.
This is so we can purge all files from all projects
with one single call.
"""
tags = super()._get_cache_tags()
tags.append(self.project_cache_tag)
return tags
def _get_project(self):
# Method used by the CDNCacheTagsMixin class.
return self.request.unresolved_domain.project
def _get_version(self):
# This view isn't attached to a version.
return None
| ServeStaticFiles |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/transpiler_test.py | {
"start": 915,
"end": 1095
} | class ____(transformer.Base):
def visit_BinOp(self, node):
if isinstance(node.op, gast.Add):
node.op = gast.Sub()
return self.generic_visit(node)
| FlipSignTransformer |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 82446,
"end": 82720
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_PRETRAINING_MAPPING
AutoModelForPreTraining = auto_class_update(AutoModelForPreTraining, head_doc="pretraining")
# Private on purpose, the public class will add the deprecation warnings.
| AutoModelForPreTraining |
python | kamyu104__LeetCode-Solutions | Python/sort-array-by-absolute-value.py | {
"start": 469,
"end": 675
} | class ____(object):
def sortByAbsoluteValue(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
nums.sort(key=lambda x: abs(x))
return nums
| Solution2 |
python | pytorch__pytorch | test/profiler/test_profiler.py | {
"start": 97254,
"end": 128715
} | class ____(TestCase):
def make_tree(self) -> list[MockNode]:
tree = {
"root_0": {
"1": {"2": {}},
"3": {
"4": {},
"5": {},
},
},
"root_1": {
"6": {},
"7": {},
"8": {
"9": {"10": {}},
},
},
}
return [MockNode(name, i) for name, i in tree.items()]
def test_dfs(self) -> None:
self.assertEqual(
" ".join(i.name for i in _utils.traverse_dfs(self.make_tree())),
"root_0 1 2 3 4 5 root_1 6 7 8 9 10",
)
def test_bfs(self) -> None:
self.assertEqual(
" ".join(i.name for i in _utils.traverse_bfs(self.make_tree())),
"root_0 root_1 1 3 6 7 8 2 4 5 9 10",
)
@staticmethod
def generate_mock_profile():
cuda_events = [
MockKinetoEvent("cudaLaunchKernel", 400, 100, 1, 0),
MockKinetoEvent("cudaLaunchKernel", 500, 100, 2, 0),
MockKinetoEvent("cudaLaunchKernel", 600, 100, 3, 0),
MockKinetoEvent("cudaLaunchKernel", 700, 100, 4, 0),
MockKinetoEvent("cudaLaunchKernel", 800, 100, 5, 0),
MockKinetoEvent("cudaLaunchKernel", 1500, 100, 6, 0),
MockKinetoEvent("GPU", 900, 100, 1, 1),
MockKinetoEvent("GPU", 1000, 100, 2, 1),
MockKinetoEvent("GPU", 1100, 100, 3, 1),
MockKinetoEvent("GPU", 1200, 100, 4, 1),
MockKinetoEvent("GPU", 1300, 100, 5, 1),
MockKinetoEvent("GPU", 1700, 100, 6, 1),
]
cpu_events = [
MockProfilerEvent("CPU (Before cudaLaunchKernel)", 1, 0, 100000),
MockProfilerEvent("CPU (Before cudaLaunchKernel)", 2, 100000, 100000),
MockProfilerEvent("CPU (Before cudaLaunchKernel)", 3, 200000, 100000),
MockProfilerEvent("CPU (Before cudaLaunchKernel)", 4, 300000, 100000),
MockProfilerEvent("CPU (After cudaLaunchKernel)", 5, 400000, 100000),
MockProfilerEvent("CPU (After cudaLaunchKernel)", 6, 500000, 100000),
MockProfilerEvent("CPU (After cudaLaunchKernel)", 7, 600000, 100000),
MockProfilerEvent("CPU (After cudaLaunchKernel)", 8, 700000, 100000),
MockProfilerEvent("CPU (After GPU)", 9, 800000, 100000),
MockProfilerEvent("CPU (After GPU)", 10, 900000, 100000),
MockProfilerEvent("CPU (After GPU)", 11, 1100000, 100000),
MockProfilerEvent("CPU (After GPU)", 12, 1200000, 500000),
]
profiler = unittest.mock.Mock()
profiler.kineto_results = unittest.mock.Mock()
profiler.kineto_results.events = unittest.mock.Mock(return_value=cuda_events)
profiler.kineto_results.experimental_event_tree = unittest.mock.Mock(
return_value=cpu_events
)
return profiler
@staticmethod
def load_mock_profile():
accept = expecttest.ACCEPT
json_file_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"profiler_utils_mock_events.json",
)
if accept and torch.cuda.is_available():
def garbage_code(x):
for i in range(5):
x[0, i] = i
x = torch.ones((4096, 4096), device="cuda")
x = x @ x
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
record_shapes=True,
with_stack=True,
) as prof:
for _ in range(5):
x = x @ x
garbage_code(x)
for _ in range(5):
x = x @ x
kineto_events = [
{
"_name": e.name,
"_start_ns": e.start_ns(),
"_duration_ns": e.duration_ns(),
"_linked_correlation_id": e.linked_correlation_id(),
"_device_type": 1 if e.device_type() == DeviceType.CUDA else 0,
}
for e in prof.profiler.kineto_results.events()
]
def EventTreeDFS(event_tree):
from collections import deque
stack = deque(event_tree)
while stack:
curr_event = stack.pop()
yield curr_event
for child_event in curr_event.children:
stack.append(child_event)
profiler_events = [
{
"_name": e.name,
"id": e.id,
"start_time_ns": e.start_time_ns,
"duration_time_ns": e.duration_time_ns,
"correlation_id": e.correlation_id,
"children": [child.id for child in e.children],
"parent": e.parent.id if e.parent else None,
}
for e in EventTreeDFS(
prof.profiler.kineto_results.experimental_event_tree()
)
]
with open(json_file_path, "w") as f:
json.dump([kineto_events, profiler_events], f)
assert os.path.exists(json_file_path)
with open(json_file_path) as f:
kineto_events, profiler_events = json.load(f)
cuda_events = [MockKinetoEvent(*event.values()) for event in kineto_events]
cpu_events = []
id_map = {}
for e in profiler_events:
event = MockProfilerEvent(**e)
id_map[event.id] = event
cpu_events.append(event)
for event in cpu_events:
parent = None if event.parent is None else id_map[event.parent]
children = [id_map[child] for child in event.children]
event.__post__init__(parent, children)
cpu_events = [event for event in cpu_events if event.parent is None]
profiler = unittest.mock.Mock()
profiler.kineto_results = unittest.mock.Mock()
profiler.kineto_results.events = unittest.mock.Mock(return_value=cuda_events)
profiler.kineto_results.experimental_event_tree = unittest.mock.Mock(
return_value=cpu_events
)
return profiler
def test_utils_compute_self_time(self):
with profile() as prof:
t1, t2 = (
torch.ones(1, requires_grad=True),
torch.ones(1, requires_grad=True),
)
z = torch.add(t1, t2)
y = torch.ones(1)
loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y)
loss.backward()
basic_eval = _utils.BasicEvaluation(prof.profiler)
metrics = basic_eval.metrics
self.assertTrue(len(metrics) > 0)
for event_key, event_metrics in metrics.items():
self.assertEqual(
event_metrics.self_time_ns,
event_key.event.duration_time_ns
- sum(child.duration_time_ns for child in event_key.event.children),
)
def test_utils_intervals_overlap(self):
event = _utils.EventKey(MockProfilerEvent("Event 1", 1, 5, 5))
intervals = [
_utils.Interval(0, 9),
_utils.Interval(1, 2),
_utils.Interval(2, 3),
_utils.Interval(3, 4),
_utils.Interval(4, 5),
_utils.Interval(8, 12),
]
print(event.intervals_overlap(intervals))
self.assertEqual(event.intervals_overlap(intervals), 5)
def test_utils_compute_queue_depth(self):
def format_queue_depth(queue_depth_list, events):
res = ""
for data, event in zip(queue_depth_list, events):
res += f"{data.queue_depth} [{event.name}]\n"
return res
# We have to use Mock because time series data is too flaky to test
profiler = self.generate_mock_profile()
basic_evaluation = _utils.BasicEvaluation(profiler)
self.assertExpectedInline(
format_queue_depth(
basic_evaluation.queue_depth_list, basic_evaluation.cuda_events
),
"""\
1 [cudaLaunchKernel]
2 [cudaLaunchKernel]
3 [cudaLaunchKernel]
4 [cudaLaunchKernel]
5 [cudaLaunchKernel]
4 [GPU]
3 [GPU]
2 [GPU]
1 [GPU]
0 [GPU]
1 [cudaLaunchKernel]
0 [GPU]
""",
)
self.assertExpectedInline(
format_queue_depth(
[basic_evaluation.metrics[k] for k in basic_evaluation.event_keys],
basic_evaluation.events,
),
"""\
0 [CPU (Before cudaLaunchKernel)]
0 [CPU (Before cudaLaunchKernel)]
0 [CPU (Before cudaLaunchKernel)]
0 [CPU (Before cudaLaunchKernel)]
1 [CPU (After cudaLaunchKernel)]
2 [CPU (After cudaLaunchKernel)]
3 [CPU (After cudaLaunchKernel)]
4 [CPU (After cudaLaunchKernel)]
5 [CPU (After GPU)]
4 [CPU (After GPU)]
2 [CPU (After GPU)]
1 [CPU (After GPU)]
""",
)
def test_utils_compute_queue_depth_when_no_cuda_events(self):
# For traces with only cpu events, we expect empty queue depth list
x = torch.ones((1024, 1024))
with profile() as prof:
for _ in range(5):
x = x @ x
basic_evaluation = _utils.BasicEvaluation(prof.profiler)
self.assertFalse(basic_evaluation.compute_queue_depth())
def test_utils_compute_idle_time(self):
profiler = self.generate_mock_profile()
basic_evaluation = _utils.BasicEvaluation(profiler)
expected_output = "\n".join(
[
f"{basic_evaluation.metrics[event_key].idle_time_ns} [{event_key.event.name}]"
for event_key in basic_evaluation.event_keys
]
)
self.assertExpectedInline(
expected_output,
"""\
100000 [CPU (Before cudaLaunchKernel)]
100000 [CPU (Before cudaLaunchKernel)]
100000 [CPU (Before cudaLaunchKernel)]
100000 [CPU (Before cudaLaunchKernel)]
0 [CPU (After cudaLaunchKernel)]
0 [CPU (After cudaLaunchKernel)]
0 [CPU (After cudaLaunchKernel)]
0 [CPU (After cudaLaunchKernel)]
0 [CPU (After GPU)]
0 [CPU (After GPU)]
0 [CPU (After GPU)]
100000 [CPU (After GPU)]""",
)
@unittest.skipIf(IS_JETSON, "JSON not behaving as expected on Jetson")
def test_utils_get_optimizable_events(self):
basic_evaluation = _utils.BasicEvaluation(self.load_mock_profile())
optimizable_events = basic_evaluation.get_optimizable_events(
2, print_enable=False
)
expected_output = "\n".join(
[f"{event_key.event.name}" for event_key in optimizable_events]
)
self.assertExpectedInline(
expected_output,
"""\
<built-in function _cuda_synchronize>
aten::copy_""",
)
def test_profiler_name_pattern(self):
x = torch.ones((4096, 4096))
with profile() as prof:
for _ in range(5):
x = x @ x
x = x + x
matched_events = NamePattern(prof, "aten::mm").matched_events()
output = "\n".join([f"{event.name}" for event in matched_events])
self.assertExpectedInline(
output,
"""\
aten::mm
aten::mm
aten::mm
aten::mm
aten::mm""",
)
# TODO: Add logic for CUDA version of test
@unittest.skipIf(torch.cuda.is_available(), "Test not working for CUDA")
def test_profiler_pattern_match_helper(self):
x = torch.ones((100, 100))
with profile() as prof:
for _ in range(5):
x = x @ x
x = x + x
event_tree = prof.profiler.kineto_results.experimental_event_tree()
pattern = Pattern(prof)
self.assertEqual([], pattern.siblings_of(event_tree[0])[0])
self.assertEqual(event_tree[1:], pattern.siblings_of(event_tree[0])[1])
child_nodes = event_tree[0].children
self.assertEqual([], pattern.siblings_of(child_nodes[0])[0])
self.assertEqual(child_nodes[1:], pattern.siblings_of(child_nodes[0])[1])
self.assertEqual(
event_tree[0], pattern.root_of(event_tree[0].children[0].children[0])
)
self.assertEqual(None, pattern.next_of(event_tree[-1]))
self.assertEqual(event_tree[1], pattern.next_of(event_tree[0]))
self.assertEqual(event_tree[0], pattern.prev_of(event_tree[1]))
@unittest.skipIf(
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_extra_cuda_copy_pattern(self):
cases = (
(0, lambda: torch.ones((100, 100), device="cuda")),
(1, lambda: torch.ones((100, 100)).to("cuda")),
(1, lambda: torch.zeros((100, 100)).to("cuda")),
(1, lambda: torch.empty((100, 100)).fill_(5).to("cuda")),
(1, lambda: torch.ones((100, 100)).cuda()),
(1, lambda: torch.zeros((100, 100)).cuda()),
(1, lambda: torch.empty((100, 100)).fill_(5).cuda()),
(1, lambda: torch.rand((100, 100)).cuda()),
(1, lambda: torch.randn((100, 100)).cuda()),
(1, lambda: torch.full((100, 100), 10).cuda()),
(0, lambda: torch.rand((100, 100)).to(dtype=torch.float16)),
(0, lambda: torch.rand((100, 100)).half()),
(0, lambda: torch.rand((100, 100), device="cuda").half()),
)
num_matched = []
for _, fn in cases:
with profile(with_stack=True, record_shapes=True) as prof:
fn()
pattern = ExtraCUDACopyPattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
@unittest.skipIf(
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
)
def test_profiler_for_loop_indexing_pattern(self):
x = torch.ones((100, 100))
def case1():
for i in range(100):
x[i] = i
def case2():
y = 0
for i in range(100):
y += x[i]
def case3():
y = 1
for i in range(100):
y *= x[i]
def case4():
y = x
for _ in range(100):
y = y @ x
def case5():
for i in range(100):
x[i, :] = torch.arange(100) + i
cases = ((1, case1), (1, case2), (1, case3), (0, case4), (1, case5))
num_matched = []
for _, fn in cases:
with profile(with_stack=True) as prof:
fn()
pattern = ForLoopIndexingPattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_fp32_matmul_pattern(self):
x = torch.ones((100, 100), device="cuda")
with profile(with_stack=True) as prof:
x = x @ x
pattern = FP32MatMulPattern(prof)
has_tf32 = 0 if pattern.skip else 1
num_matched = len(pattern.matched_events())
self.assertEqual(num_matched, has_tf32)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_extra_cuda_copy_pattern_benchmark(self):
with profile(with_stack=True, record_shapes=True) as prof:
x = torch.ones((100, 100)).to("cuda")
x = torch.ones((50, 50)).to("cuda")
pattern = ExtraCUDACopyPattern(prof)
shapes_factor_map = pattern.benchmark(pattern.matched_events())
self.assertEqual(len(shapes_factor_map), 2)
def test_profiler_optimizer_single_tensor_pattern(self):
x = torch.ones((100, 100))
cases = (
(1, lambda: torch.optim.Adam(model.parameters())),
(1, lambda: torch.optim.SGD(model.parameters(), lr=0.01)),
(1, lambda: torch.optim.AdamW(model.parameters())),
(0, lambda: torch.optim.Adam(model.parameters(), foreach=True)),
(0, lambda: torch.optim.SGD(model.parameters(), lr=0.01, foreach=True)),
(0, lambda: torch.optim.AdamW(model.parameters(), foreach=True)),
)
num_matched = []
for _, fn in cases:
with profile(with_stack=True) as prof:
model = nn.Sequential(
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
optimizer = fn()
optimizer.zero_grad()
y_hat = model(x)
loss = torch.nn.functional.cross_entropy(
y_hat, torch.randint(0, 10, (100,))
)
loss.backward()
optimizer.step()
pattern = OptimizerSingleTensorPattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
def test_profiler_synchronized_dataloader_pattern(self):
dataset = torch.rand((100, 100))
sync_dataloader = torch.utils.data.DataLoader(dataset, batch_size=10)
async_dataloader = torch.utils.data.DataLoader(
dataset, batch_size=10, num_workers=4
)
with profile(with_stack=True) as prof:
next(iter(sync_dataloader))
next(iter(async_dataloader))
pattern = SynchronizedDataLoaderPattern(prof)
num_matched = len(pattern.matched_events())
self.assertEqual(num_matched, 1)
@skipIfTorchDynamo(
"pattern checks for aten::_zero op which might not be there with torch.compile'd graph"
)
def test_profiler_grad_not_set_to_none_pattern(self):
x = torch.ones((100, 100))
model = nn.Sequential(
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
optimizer = torch.optim.Adam(model.parameters())
cases = (
(0, lambda: optimizer.zero_grad()),
(0, lambda: model.zero_grad()),
(1, lambda: optimizer.zero_grad(set_to_none=False)),
(1, lambda: model.zero_grad(set_to_none=False)),
)
num_matched = []
for _, fn in cases:
with profile(with_stack=True) as prof:
y_hat = model(x)
loss = torch.nn.functional.cross_entropy(
y_hat, torch.randint(0, 10, (100,))
)
loss.backward()
optimizer.step()
fn()
pattern = GradNotSetToNonePattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
def test_profiler_conv2d_bias_followed_by_batchnorm2d_pattern(self):
x = torch.randn((1, 3, 32, 32))
cases = (
(1, nn.Sequential(nn.Conv2d(3, 3, 3, 1, 1), nn.BatchNorm2d(3))),
(0, nn.Sequential(nn.Conv2d(3, 3, 3, 1, 1, bias=False), nn.BatchNorm2d(3))),
(0, nn.Sequential(nn.Conv2d(3, 3, 3, 1, 1))),
)
num_matched = []
for _, model in cases:
with profile(with_stack=True, record_shapes=True) as prof:
model(x)
pattern = Conv2dBiasFollowedByBatchNorm2dPattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_profiler_matmul_dim_fp16_pattern(self):
cases = (
(1, torch.randn((201, 201), device="cuda", dtype=torch.float16)),
(1, torch.randn((3, 97, 97), device="cuda", dtype=torch.float16)),
(0, torch.randn((200, 200), device="cuda", dtype=torch.float16)),
(0, torch.randn((3, 200, 200), device="cuda", dtype=torch.float16)),
)
num_matched = []
for _, x in cases:
with profile(with_stack=True, record_shapes=True) as prof:
x @ x
pattern = MatMulDimInFP16Pattern(prof)
num_matched.append(len(pattern.matched_events()))
self.assertEqual(num_matched, [i for i, _ in cases])
@skipIfTorchDynamo("profiler gets ignored if dynamo activated")
def test_profiler_pattern_matcher_json_report(self):
x = torch.ones((100, 100))
model = nn.Sequential(
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
optimizer = torch.optim.Adam(model.parameters())
with profile(with_stack=True, record_shapes=True) as prof:
y_hat = model(x)
loss = torch.nn.functional.cross_entropy(
y_hat, torch.randint(0, 10, (100,))
)
loss.backward()
optimizer.step()
optimizer.zero_grad()
with tempfile.TemporaryDirectory() as tmpdir:
report_all_anti_patterns(prof, json_report_dir=tmpdir, print_enable=False)
with open(os.path.join(tmpdir, "torchtidy_report.json")) as f:
report = json.load(f)
# It is platform dependent whether the path will include "profiler/"
keys = [k for k in report if k.endswith("test_profiler.py")]
self.assertEqual(len(keys), 1, f"{keys}")
entry = report[keys[0]]
self.assertTrue(len(entry) > 0)
expected_fields = sorted(["line_number", "name", "url", "message"])
for event in entry:
actual_fields = sorted(event.keys())
self.assertEqual(expected_fields, actual_fields)
@unittest.skipIf(IS_ARM64 or not IS_LINUX, "x86 linux only cpp unwinding")
def test_fuzz_symbolize(self):
# generate some random addresses in the text section and make sure the
# symbolizers do not throw exceptions/crash
def get_text_sections():
text_sections = []
seen = set()
for filename in os.listdir("/proc/self/map_files"):
library = os.readlink("/proc/self/map_files/" + filename)
if ".so" not in library or library in seen:
continue
seen.add(library)
with open(os.path.join("/proc/self/map_files", library), "rb") as f:
mm = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)
def unpack(fmt, offset):
return struct.unpack(
fmt, mm[offset : offset + struct.calcsize(fmt)]
)
if mm[:4] != b"\x7fELF":
continue
(section_headers_start,) = unpack("Q", 40)
(section_header_size,) = unpack("H", 58)
(num_section_headers,) = unpack("H", 60)
(shstrndx,) = unpack("H", 62)
(shstrtab_offset,) = unpack(
"Q", section_headers_start + shstrndx * section_header_size + 24
)
for i in range(num_section_headers):
(section_name_offset,) = unpack(
"I", section_headers_start + i * section_header_size
)
name_start = shstrtab_offset + section_name_offset
section_name = mm[name_start : name_start + 6]
if section_name != b".text\0":
continue
(section_offset,) = unpack(
"Q", section_headers_start + i * section_header_size + 24
)
(section_size,) = unpack(
"Q", section_headers_start + i * section_header_size + 32
)
start = int(filename.split("-")[0], 16) + section_offset
text_sections.append((start, section_size))
break
mm.close()
return text_sections
r = random.Random()
r.seed(1)
text_sections = get_text_sections()
addrs = []
for _ in range(200):
s = r.randrange(0, len(text_sections))
start, size = text_sections[s]
addr = r.randrange(start, start + size)
addrs.append(addr)
fast = torch._C._profiler.symbolize_addresses(addrs, "fast")
dladdr = torch._C._profiler.symbolize_addresses(addrs, "dladdr")
addr2line = torch._C._profiler.symbolize_addresses(addrs, "addr2line")
self.assertEqual(len(fast), len(addrs))
self.assertEqual(len(addr2line), len(fast))
def test_profiler_overload_names(self):
from torch.library import _scoped_library, fallthrough_kernel
def validate_json(prof):
print()
with TemporaryFileName(mode="w+") as fname:
prof.export_chrome_trace(fname)
with open(fname) as f:
events = json.load(f)["traceEvents"]
self.assertTrue(
any("aten::add.Tensor" in e["name"] for e in events)
)
self.assertTrue(any("aten::add.out" in e["name"] for e in events))
with _scoped_library("aten", "IMPL") as my_lib:
my_lib.impl("add.Tensor", fallthrough_kernel, "CPU")
experimental_config = torch._C._profiler._ExperimentalConfig(
capture_overload_names=True
)
with profile(
experimental_config=experimental_config,
activities=[ProfilerActivity.CPU],
) as prof:
torch.add(1, 5)
# The following execution trace is expected
#
# Dispatch trace:
# [call] op=[aten::add.Tensor], key=[AutogradCPU]
# [redispatch] op=[aten::add.Tensor], key=[Undefined]
# [call] op=[aten::empty.memory_format], key=[BackendSelect]
# [redispatch] op=[aten::empty.memory_format], key=[CPU]
# [call] op=[aten::add.out], key=[CPU]
#
# prof.table()
# --------------- --------------- ------------ ------------ ------------ ------------ ------------ ------------
# Name Overload Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls
# --------------- --------------- ------------ ------------ ------------ ------------ ------------ ------------
# aten::add Tensor 71.97% 130.887us 100.00% 181.873us 181.873us 1
# aten::empty memory_format 8.52% 15.489us 8.52% 15.489us 15.489us 1
# aten::add out 19.52% 35.497us 19.52% 35.497us 35.497us 1
# --------------- --------------- ------------ ------------ ------------ ------------ ------------ ------------
# aten::add.out and aten::empty.memory_format are children of aten::add.Tensor
aten_add_parent: list[FunctionEvent] = [
event for event in prof.events() if len(event.cpu_children) == 2
]
assert len(aten_add_parent) == 1
aten_add_parent = aten_add_parent[0]
assert aten_add_parent.overload_name == "Tensor"
aten_add_out_event = [
c for c in aten_add_parent.cpu_children if c.overload_name == "out"
]
assert len(aten_add_out_event) == 1
# Without group_by_overload_name, the overload name is ignored in the key averages
key_averages = prof.key_averages()
assert len(key_averages) == 2
assert "Overload Name" not in key_averages.table()
key_averages = prof.key_averages(group_by_overload_name=True)
assert len(key_averages) == 3
assert "Overload Name" in key_averages.table()
validate_json(prof)
def test_expose_kineto_event_metadata(self):
def check_metadata(prof, op_name, metadata_key):
with TemporaryFileName(mode="w+") as fname:
prof.export_chrome_trace(fname)
with open(fname) as f:
events = json.load(f)["traceEvents"]
found_op = False
for e in events:
if "name" in e and "args" in e and e["name"] == op_name:
assert metadata_key in e["args"], (
f"Metadata for '{op_name}' in Chrome trace did not contain '{metadata_key}'."
)
found_op = True
assert found_op, f"Could not find op '{op_name}' in Chrome trace."
found_op = False
for event in prof.events():
if event.name == op_name:
assert metadata_key in event.metadata_json, (
f"Metadata for '{op_name}' in FunctionEvent did not contain '{metadata_key}'."
)
found_op = True
assert found_op, f"Could not find op '{op_name}' in prof.events()."
experimental_config = torch._C._profiler._ExperimentalConfig(
expose_kineto_event_metadata=True
)
with profile(
experimental_config=experimental_config,
activities=[ProfilerActivity.CPU],
) as prof:
torch.add(1, 5)
check_metadata(prof, op_name="aten::add", metadata_key="Ev Idx")
@unittest.skipIf(not torch.cuda.is_available(), "requires CUDA")
def test_profiler_debug_autotuner(self):
"""
This test makes sure that profiling events will be present when the kernel is run using the DebugAutotuner.
"""
if not is_big_gpu():
raise unittest.SkipTest("requires large gpu to max-autotune")
in1 = torch.randn((256, 512), device="cuda", dtype=torch.float16)
in2 = torch.randn((512, 768), device="cuda", dtype=torch.float16)
def mm():
return torch.mm(in1, in2)
pb_mm = torch.compile(
mm,
options={
"benchmark_kernel": True,
"max_autotune": True,
"max_autotune_gemm_backends": "TRITON",
"profile_bandwidth": True,
},
)
comp_mm = torch.compile(
mm,
options={
"benchmark_kernel": True,
"max_autotune": True,
"max_autotune_gemm_backends": "TRITON",
},
)
with profile() as prof1:
pb_mm()
with profile() as prof2:
comp_mm()
def names(prof):
return {
ev.name
for ev in prof.events()
if "mm" in ev.name or "triton" in ev.name
}
n1 = names(prof1)
n2 = names(prof2)
self.assertEqual(n1, n2)
if __name__ == "__main__":
run_tests()
| TestExperimentalUtils |
python | mlflow__mlflow | dev/clint/src/clint/rules/pytest_mark_repeat.py | {
"start": 84,
"end": 709
} | class ____(Rule):
def _message(self) -> str:
return (
"@pytest.mark.repeat decorator should not be committed. "
"This decorator is meant for local testing only to check for flaky tests."
)
@staticmethod
def check(decorator_list: list[ast.expr], resolver: Resolver) -> ast.expr | None:
"""
Returns the decorator node if it is a `@pytest.mark.repeat` decorator.
"""
for deco in decorator_list:
if (res := resolver.resolve(deco)) and res == ["pytest", "mark", "repeat"]:
return deco
return None
| PytestMarkRepeat |
python | chroma-core__chroma | chromadb/execution/expression/operator.py | {
"start": 33070,
"end": 33267
} | class ____(Rank):
"""Multiplication of multiple ranks"""
ranks: List[Rank]
def to_dict(self) -> Dict[str, Any]:
return {"$mul": [r.to_dict() for r in self.ranks]}
@dataclass
| Mul |
python | getsentry__sentry | src/sentry/analytics/events/monitor_mark_failed.py | {
"start": 88,
"end": 318
} | class ____(analytics.Event):
organization_id: int
monitor_id: str # this is stringified in the caller
project_id: int
environment_id: int
analytics.register(MonitorEnvironmentMarkFailed)
| MonitorEnvironmentMarkFailed |
python | facebook__pyre-check | client/language_server/tests/daemon_connection_test.py | {
"start": 512,
"end": 841
} | class ____(connections.AsyncBytesReader):
def __init__(self, exception: Exception) -> None:
self.exception = exception
async def read_until(self, separator: bytes = b"\n") -> bytes:
raise self.exception
async def read_exactly(self, count: int) -> bytes:
raise self.exception
| RaisingBytesReader |
python | ray-project__ray | python/ray/serve/_private/metrics_utils.py | {
"start": 651,
"end": 751
} | class ____:
task_func: Union[Callable, Callable[[], Awaitable]]
interval_s: float
| _MetricsTask |
python | scipy__scipy | scipy/special/_multiufuncs.py | {
"start": 541,
"end": 19402
} | class ____:
def __init__(self, ufunc_or_ufuncs, name=None, doc=None, *,
force_complex_output=False, **default_kwargs):
if not isinstance(ufunc_or_ufuncs, np.ufunc):
if isinstance(ufunc_or_ufuncs, collections.abc.Mapping):
ufuncs_iter = ufunc_or_ufuncs.values()
elif isinstance(ufunc_or_ufuncs, collections.abc.Iterable):
ufuncs_iter = ufunc_or_ufuncs
else:
raise ValueError("ufunc_or_ufuncs should be a ufunc or a"
" ufunc collection")
# Perform input validation to ensure all ufuncs in ufuncs are
# actually ufuncs and all take the same input types.
seen_input_types = set()
for ufunc in ufuncs_iter:
if not isinstance(ufunc, np.ufunc):
raise ValueError("All ufuncs must have type `numpy.ufunc`."
f" Received {ufunc_or_ufuncs}")
seen_input_types.add(frozenset(x.split("->")[0] for x in ufunc.types))
if len(seen_input_types) > 1:
raise ValueError("All ufuncs must take the same input types.")
self.__name__ = name
self._ufunc_or_ufuncs = ufunc_or_ufuncs
self.__doc = doc
self.__force_complex_output = force_complex_output
self._default_kwargs = default_kwargs
self._resolve_out_shapes = None
self._finalize_out = None
self._key = None
self._ufunc_default_args = lambda *args, **kwargs: ()
self._ufunc_default_kwargs = lambda *args, **kwargs: {}
@property
def __doc__(self):
return self.__doc
def _override_key(self, func):
"""Set `key` method by decorating a function.
"""
self._key = func
def _override_ufunc_default_args(self, func):
self._ufunc_default_args = func
def _override_ufunc_default_kwargs(self, func):
self._ufunc_default_kwargs = func
def _override_resolve_out_shapes(self, func):
"""Set `resolve_out_shapes` method by decorating a function."""
if func.__doc__ is None:
func.__doc__ = \
"""Resolve to output shapes based on relevant inputs."""
func.__name__ = "resolve_out_shapes"
self._resolve_out_shapes = func
def _override_finalize_out(self, func):
self._finalize_out = func
def _resolve_ufunc(self, **kwargs):
"""Resolve to a ufunc based on keyword arguments."""
if isinstance(self._ufunc_or_ufuncs, np.ufunc):
return self._ufunc_or_ufuncs
ufunc_key = self._key(**kwargs)
return self._ufunc_or_ufuncs[ufunc_key]
def __call__(self, *args, **kwargs):
kwargs = self._default_kwargs | kwargs
args += self._ufunc_default_args(**kwargs)
ufunc = self._resolve_ufunc(**kwargs)
# array arguments to be passed to the ufunc
ufunc_args = [np.asarray(arg) for arg in args[-ufunc.nin:]]
ufunc_kwargs = self._ufunc_default_kwargs(**kwargs)
if (self._resolve_out_shapes is not None):
ufunc_arg_shapes = tuple(np.shape(ufunc_arg) for ufunc_arg in ufunc_args)
ufunc_out_shapes = self._resolve_out_shapes(*args[:-ufunc.nin],
*ufunc_arg_shapes, ufunc.nout,
**kwargs)
ufunc_arg_dtypes = tuple(ufunc_arg.dtype if hasattr(ufunc_arg, 'dtype')
else np.dtype(type(ufunc_arg))
for ufunc_arg in ufunc_args)
if hasattr(ufunc, 'resolve_dtypes'):
ufunc_dtypes = ufunc_arg_dtypes + ufunc.nout * (None,)
ufunc_dtypes = ufunc.resolve_dtypes(ufunc_dtypes)
ufunc_out_dtypes = ufunc_dtypes[-ufunc.nout:]
else:
ufunc_out_dtype = np.result_type(*ufunc_arg_dtypes)
if (not np.issubdtype(ufunc_out_dtype, np.inexact)):
ufunc_out_dtype = np.float64
ufunc_out_dtypes = ufunc.nout * (ufunc_out_dtype,)
if self.__force_complex_output:
ufunc_out_dtypes = tuple(np.result_type(1j, ufunc_out_dtype)
for ufunc_out_dtype in ufunc_out_dtypes)
out = tuple(np.empty(ufunc_out_shape, dtype=ufunc_out_dtype)
for ufunc_out_shape, ufunc_out_dtype
in zip(ufunc_out_shapes, ufunc_out_dtypes))
ufunc_kwargs['out'] = out
out = ufunc(*ufunc_args, **ufunc_kwargs)
if (self._finalize_out is not None):
out = self._finalize_out(out)
return out
sph_legendre_p = MultiUFunc(
sph_legendre_p,
"sph_legendre_p",
r"""sph_legendre_p(n, m, theta, *, diff_n=0)
Spherical Legendre polynomial of the first kind.
Parameters
----------
n : ArrayLike[int]
Degree of the spherical Legendre polynomial. Must have ``n >= 0``.
m : ArrayLike[int]
Order of the spherical Legendre polynomial.
theta : ArrayLike[float]
Input value.
diff_n : Optional[int]
A non-negative integer. Compute and return all derivatives up
to order ``diff_n``. Default is 0.
Returns
-------
p : ndarray or tuple[ndarray]
Spherical Legendre polynomial with ``diff_n`` derivatives.
Notes
-----
The spherical counterpart of an (unnormalized) associated Legendre polynomial has
the additional factor
.. math::
\sqrt{\frac{(2 n + 1) (n - m)!}{4 \pi (n + m)!}}
It is the same as the spherical harmonic :math:`Y_{n}^{m}(\theta, \phi)`
with :math:`\phi = 0`.
""", diff_n=0
)
@sph_legendre_p._override_key
def _(diff_n):
diff_n = _nonneg_int_or_fail(diff_n, "diff_n", strict=False)
if not 0 <= diff_n <= 2:
raise ValueError(
"diff_n is currently only implemented for orders 0, 1, and 2,"
f" received: {diff_n}."
)
return diff_n
@sph_legendre_p._override_finalize_out
def _(out):
return np.moveaxis(out, -1, 0)
sph_legendre_p_all = MultiUFunc(
sph_legendre_p_all,
"sph_legendre_p_all",
"""sph_legendre_p_all(n, m, theta, *, diff_n=0)
All spherical Legendre polynomials of the first kind up to the
specified degree ``n``, order ``m``, and all derivatives up
to order ``diff_n``.
Output shape is ``(diff_n + 1, n + 1, 2 * m + 1, ...)``. The entry at
``(i, j, k)`` corresponds to the ``i``-th derivative, degree ``j``, and
order ``k`` for all ``0 <= i <= diff_n``, ``0 <= j <= n``, and
``-m <= k <= m``.
See Also
--------
sph_legendre_p
""", diff_n=0
)
@sph_legendre_p_all._override_key
def _(diff_n):
diff_n = _nonneg_int_or_fail(diff_n, "diff_n", strict=False)
if not 0 <= diff_n <= 2:
raise ValueError(
"diff_n is currently only implemented for orders 0, 1, and 2,"
f" received: {diff_n}."
)
return diff_n
@sph_legendre_p_all._override_ufunc_default_kwargs
def _(diff_n):
return {'axes': [()] + [(0, 1, -1)]}
@sph_legendre_p_all._override_resolve_out_shapes
def _(n, m, theta_shape, nout, diff_n):
if not isinstance(n, numbers.Integral) or (n < 0):
raise ValueError("n must be a non-negative integer.")
return ((n + 1, 2 * abs(m) + 1) + theta_shape + (diff_n + 1,),)
@sph_legendre_p_all._override_finalize_out
def _(out):
return np.moveaxis(out, -1, 0)
assoc_legendre_p = MultiUFunc(
assoc_legendre_p,
"assoc_legendre_p",
r"""assoc_legendre_p(n, m, z, *, branch_cut=2, norm=False, diff_n=0)
Associated Legendre polynomial of the first kind.
Parameters
----------
n : ArrayLike[int]
Degree of the associated Legendre polynomial. Must have ``n >= 0``.
m : ArrayLike[int]
order of the associated Legendre polynomial.
z : ArrayLike[float | complex]
Input value.
branch_cut : Optional[ArrayLike[int]]
Selects branch cut. Must be 2 (default) or 3.
2: cut on the real axis ``|z| > 1``
3: cut on the real axis ``-1 < z < 1``
norm : Optional[bool]
If ``True``, compute the normalized associated Legendre polynomial.
Default is ``False``.
diff_n : Optional[int]
A non-negative integer. Compute and return all derivatives up
to order ``diff_n``. Default is 0.
Returns
-------
p : ndarray or tuple[ndarray]
Associated Legendre polynomial with ``diff_n`` derivatives.
Notes
-----
The normalized counterpart of an (unnormalized) associated Legendre
polynomial has the additional factor
.. math::
\sqrt{\frac{(2 n + 1) (n - m)!}{2 (n + m)!}}
""", branch_cut=2, norm=False, diff_n=0
)
@assoc_legendre_p._override_key
def _(branch_cut, norm, diff_n):
diff_n = _nonneg_int_or_fail(diff_n, "diff_n", strict=False)
if not 0 <= diff_n <= 2:
raise ValueError(
"diff_n is currently only implemented for orders 0, 1, and 2,"
f" received: {diff_n}."
)
return norm, diff_n
@assoc_legendre_p._override_ufunc_default_args
def _(branch_cut, norm, diff_n):
return branch_cut,
@assoc_legendre_p._override_finalize_out
def _(out):
return np.moveaxis(out, -1, 0)
assoc_legendre_p_all = MultiUFunc(
assoc_legendre_p_all,
"assoc_legendre_p_all",
"""assoc_legendre_p_all(n, m, z, *, branch_cut=2, norm=False, diff_n=0)
All associated Legendre polynomials of the first kind up to the
specified degree ``n``, order ``m``, and all derivatives up
to order ``diff_n``.
Output shape is ``(diff_n + 1, n + 1, 2 * m + 1, ...)``. The entry at
``(i, j, k)`` corresponds to the ``i``-th derivative, degree ``j``, and
order ``k`` for all ``0 <= i <= diff_n``, ``0 <= j <= n``, and
``-m <= k <= m``.
See Also
--------
assoc_legendre_p
""", branch_cut=2, norm=False, diff_n=0
)
@assoc_legendre_p_all._override_key
def _(branch_cut, norm, diff_n):
if not ((isinstance(diff_n, numbers.Integral))
and diff_n >= 0):
raise ValueError(
f"diff_n must be a non-negative integer, received: {diff_n}."
)
if not 0 <= diff_n <= 2:
raise ValueError(
"diff_n is currently only implemented for orders 0, 1, and 2,"
f" received: {diff_n}."
)
return norm, diff_n
@assoc_legendre_p_all._override_ufunc_default_args
def _(branch_cut, norm, diff_n):
return branch_cut,
@assoc_legendre_p_all._override_ufunc_default_kwargs
def _(branch_cut, norm, diff_n):
return {'axes': [(), ()] + [(0, 1, -1)]}
@assoc_legendre_p_all._override_resolve_out_shapes
def _(n, m, z_shape, branch_cut_shape, nout, **kwargs):
diff_n = kwargs['diff_n']
if not isinstance(n, numbers.Integral) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isinstance(m, numbers.Integral) or (m < 0):
raise ValueError("m must be a non-negative integer.")
return ((n + 1, 2 * abs(m) + 1) +
np.broadcast_shapes(z_shape, branch_cut_shape) + (diff_n + 1,),)
@assoc_legendre_p_all._override_finalize_out
def _(out):
return np.moveaxis(out, -1, 0)
legendre_p = MultiUFunc(
legendre_p,
"legendre_p",
"""legendre_p(n, z, *, diff_n=0)
Legendre polynomial of the first kind.
Parameters
----------
n : ArrayLike[int]
Degree of the Legendre polynomial. Must have ``n >= 0``.
z : ArrayLike[float]
Input value.
diff_n : Optional[int]
A non-negative integer. Compute and return all derivatives up
to order ``diff_n``. Default is 0.
Returns
-------
p : ndarray or tuple[ndarray]
Legendre polynomial with ``diff_n`` derivatives.
See Also
--------
legendre
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
""", diff_n=0
)
@legendre_p._override_key
def _(diff_n):
if (not isinstance(diff_n, numbers.Integral)) or (diff_n < 0):
raise ValueError(
f"diff_n must be a non-negative integer, received: {diff_n}."
)
if not 0 <= diff_n <= 2:
raise NotImplementedError(
"diff_n is currently only implemented for orders 0, 1, and 2,"
f" received: {diff_n}."
)
return diff_n
@legendre_p._override_finalize_out
def _(out):
return np.moveaxis(out, -1, 0)
legendre_p_all = MultiUFunc(
legendre_p_all,
"legendre_p_all",
"""legendre_p_all(n, z, *, diff_n=0)
All Legendre polynomials of the first kind up to the specified degree
``n`` and all derivatives up to order ``diff_n``.
Output shape is ``(diff_n + 1, n + 1, ...)``. The entry at ``(i, j)``
corresponds to the ``i``-th derivative and degree ``j`` for all
``0 <= i <= diff_n`` and ``0 <= j <= n``.
See Also
--------
legendre_p
""", diff_n=0
)
@legendre_p_all._override_key
def _(diff_n):
diff_n = _nonneg_int_or_fail(diff_n, "diff_n", strict=False)
if not 0 <= diff_n <= 2:
raise ValueError(
"diff_n is currently only implemented for orders 0, 1, and 2,"
f" received: {diff_n}."
)
return diff_n
@legendre_p_all._override_ufunc_default_kwargs
def _(diff_n):
return {'axes': [(), (0, -1)]}
@legendre_p_all._override_resolve_out_shapes
def _(n, z_shape, nout, diff_n):
n = _nonneg_int_or_fail(n, 'n', strict=False)
return nout * ((n + 1,) + z_shape + (diff_n + 1,),)
@legendre_p_all._override_finalize_out
def _(out):
return np.moveaxis(out, -1, 0)
sph_harm_y = MultiUFunc(
sph_harm_y,
"sph_harm_y",
r"""sph_harm_y(n, m, theta, phi, *, diff_n=0)
Spherical harmonics. They are defined as
.. math::
Y_n^m(\theta,\phi) = \sqrt{\frac{2 n + 1}{4 \pi} \frac{(n - m)!}{(n + m)!}}
P_n^m(\cos(\theta)) e^{i m \phi}
where :math:`P_n^m` are the (unnormalized) associated Legendre polynomials.
Parameters
----------
n : ArrayLike[int]
Degree of the harmonic. Must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
m : ArrayLike[int]
Order of the harmonic.
theta : ArrayLike[float]
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
phi : ArrayLike[float]
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
diff_n : Optional[int]
A non-negative integer. Compute and return all derivatives up
to order ``diff_n``. Default is 0.
Returns
-------
y : ndarray[complex] or tuple[ndarray[complex]]
Spherical harmonics with ``diff_n`` derivatives.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
polar angle and ``phi`` is the azimuthal angle. It is common to
see the opposite convention, that is, ``theta`` as the azimuthal angle
and ``phi`` as the polar angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `sph_legendre_p`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\phi} \sin(\theta) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\theta) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\phi} \sin(\theta).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""", force_complex_output=True, diff_n=0
)
@sph_harm_y._override_key
def _(diff_n):
diff_n = _nonneg_int_or_fail(diff_n, "diff_n", strict=False)
if not 0 <= diff_n <= 2:
raise ValueError(
"diff_n is currently only implemented for orders 0, 1, and 2,"
f" received: {diff_n}."
)
return diff_n
@sph_harm_y._override_finalize_out
def _(out):
if (out.shape[-1] == 1):
return out[..., 0, 0]
if (out.shape[-1] == 2):
return out[..., 0, 0], out[..., [1, 0], [0, 1]]
if (out.shape[-1] == 3):
return (out[..., 0, 0], out[..., [1, 0], [0, 1]],
out[..., [[2, 1], [1, 0]], [[0, 1], [1, 2]]])
sph_harm_y_all = MultiUFunc(
sph_harm_y_all,
"sph_harm_y_all",
"""sph_harm_y_all(n, m, theta, phi, *, diff_n=0)
All spherical harmonics up to the specified degree ``n``, order ``m``,
and all derivatives up to order ``diff_n``.
Returns a tuple of length ``diff_n + 1`` (if ``diff_n > 0``). The first
entry corresponds to the spherical harmonics, the second entry
(if ``diff_n >= 1``) to the gradient, and the third entry
(if ``diff_n >= 2``) to the Hessian matrix. Each entry is an array of
shape ``(n + 1, 2 * m + 1, ...)``, where the entry at ``(i, j)``
corresponds to degree ``i`` and order ``j`` for all ``0 <= i <= n``
and ``-m <= j <= m``.
See Also
--------
sph_harm_y
""", force_complex_output=True, diff_n=0
)
@sph_harm_y_all._override_key
def _(diff_n):
diff_n = _nonneg_int_or_fail(diff_n, "diff_n", strict=False)
if not 0 <= diff_n <= 2:
raise ValueError(
"diff_n is currently only implemented for orders 2,"
f" received: {diff_n}."
)
return diff_n
@sph_harm_y_all._override_ufunc_default_kwargs
def _(diff_n):
return {'axes': [(), ()] + [(0, 1, -2, -1)]}
@sph_harm_y_all._override_resolve_out_shapes
def _(n, m, theta_shape, phi_shape, nout, **kwargs):
diff_n = kwargs['diff_n']
if not isinstance(n, numbers.Integral) or (n < 0):
raise ValueError("n must be a non-negative integer.")
return ((n + 1, 2 * abs(m) + 1) + np.broadcast_shapes(theta_shape, phi_shape) +
(diff_n + 1, diff_n + 1),)
@sph_harm_y_all._override_finalize_out
def _(out):
if (out.shape[-1] == 1):
return out[..., 0, 0]
if (out.shape[-1] == 2):
return out[..., 0, 0], out[..., [1, 0], [0, 1]]
if (out.shape[-1] == 3):
return (out[..., 0, 0], out[..., [1, 0], [0, 1]],
out[..., [[2, 1], [1, 0]], [[0, 1], [1, 2]]])
| MultiUFunc |
python | cherrypy__cherrypy | cherrypy/_cpwsgi_server.py | {
"start": 194,
"end": 815
} | class ____(cheroot.server.HTTPRequest):
"""Wrapper for cheroot.server.HTTPRequest.
This is a layer, which preserves URI parsing mode like it which was
before Cheroot v5.8.0.
"""
def __init__(self, server, conn):
"""Initialize HTTP request container instance.
Args:
server (cheroot.server.HTTPServer):
web server object receiving this request
conn (cheroot.server.HTTPConnection):
HTTP connection object for this request
"""
super(CPWSGIHTTPRequest, self).__init__(server, conn, proxy_mode=True)
| CPWSGIHTTPRequest |
python | Textualize__textual | docs/examples/guide/screens/modes01.py | {
"start": 409,
"end": 541
} | class ____(Screen):
def compose(self) -> ComposeResult:
yield Placeholder("Help Screen")
yield Footer()
| HelpScreen |
python | getsentry__sentry | src/sudo/utils.py | {
"start": 405,
"end": 2376
} | class ____(HttpRequest):
_sudo: bool
_sudo_token: str
_sudo_max_age: int
def _allow_sudo_attribute_stuffing(request: HttpRequest) -> _SudoRequest:
# cast to our fake type which allows typesafe attribute stuffing
return cast(_SudoRequest, request)
def grant_sudo_privileges(request: HttpRequest, max_age: int = COOKIE_AGE) -> str | None:
"""
Assigns a random token to the user's session
that allows them to have elevated permissions
"""
request = _allow_sudo_attribute_stuffing(request)
user = getattr(request, "user", None)
# If there's not a user on the request, just noop
if user is None:
return None
if not user.is_authenticated:
raise ValueError("User needs to be logged in to be elevated to sudo")
# Token doesn't need to be unique,
# just needs to be unpredictable and match the cookie and the session
token = get_random_string(12)
request.session[COOKIE_NAME] = token
request._sudo = True
request._sudo_token = token
request._sudo_max_age = max_age
return token
def revoke_sudo_privileges(request: HttpRequest) -> None:
"""
Revoke sudo privileges from a request explicitly
"""
request = _allow_sudo_attribute_stuffing(request)
request._sudo = False
if COOKIE_NAME in request.session:
del request.session[COOKIE_NAME]
def has_sudo_privileges(request: HttpRequest) -> bool:
"""
Check if a request is allowed to perform sudo actions
"""
request = _allow_sudo_attribute_stuffing(request)
if getattr(request, "_sudo", None) is None:
try:
request._sudo = request.user.is_authenticated and constant_time_compare(
request.get_signed_cookie(COOKIE_NAME, salt=COOKIE_SALT, max_age=COOKIE_AGE) or "",
request.session[COOKIE_NAME],
)
except (KeyError, BadSignature):
request._sudo = False
return request._sudo
| _SudoRequest |
python | huggingface__transformers | src/transformers/models/llama4/processing_llama4.py | {
"start": 939,
"end": 6248
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding_side": "left",
},
}
chat_template = "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %} \n {%- if messages[0]['content'] is string %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- else %}\n {#- FIXME: The processor requires an array, always. #}\n {%- set system_message = messages[0]['content'][0]['text']|trim %}\n {%- endif %}\n {%- set messages = messages[1:] %}\n {%- set user_supplied_system_message = true %}\n{%- else %}\n {%- set system_message = \"\" %}\n {%- set user_supplied_system_message = false %}\n{%- endif %}\n\n{#- System message if the user supplied one #}\n{%- if user_supplied_system_message %}\n {{- \"<|header_start|>system<|header_end|>\n\n\" }}\n {%- if tools is not none %}\n {{- \"Environment: ipython\n\" }}\n {%- endif %}\n {%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\n\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\n\n\" }}\n {%- endfor %}\n {%- endif %}\n {{- system_message }}\n {{- \"<|eot|>\" }}\n{%- endif %}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|header_start|>user<|header_end|>\n\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\n\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\n\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\n\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|header_start|>' + message['role'] + '<|header_end|>\n\n' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- \"<|eot|>\" }}\n {%- elif 'tool_calls' in message and message.tool_calls|length > 0 %}\n {{- '<|header_start|>assistant<|header_end|>\n\n' -}}\n {{- '<|python_start|>' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '<|python_end|>' }}\n {%- for tool_call in message.tool_calls %}\n {{- '{\"name\": \"' + tool_call.function.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.function.arguments | tojson }}\n {{- \"}\" }}\n {%- endfor %}\n {{- \"<|eot|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|header_start|>ipython<|header_end|>\n\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|header_start|>assistant<|header_end|>\n\n' }}\n{%- endif %}\n"
| Llama4ProcessorKwargs |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0021_add-webhook-deprecation-feature.py | {
"start": 143,
"end": 313
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0020_add-api-project-proxy"),
]
operations = []
| Migration |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/pipes/clients/ecs.py | {
"start": 1229,
"end": 15482
} | class ____(PipesClient, TreatAsResourceParam):
"""A pipes client for running AWS ECS tasks.
Args:
client (Any): The boto ECS client used to launch the ECS task
context_injector (Optional[PipesContextInjector]): A context injector to use to inject
context into the ECS task. Defaults to :py:class:`PipesEnvContextInjector`.
message_reader (Optional[PipesMessageReader]): A message reader to use to read messages
from the ECS task. Defaults to :py:class:`PipesCloudWatchMessageReader`.
forward_termination (bool): Whether to cancel the ECS task when the Dagster process receives a termination signal.
"""
def __init__(
self,
client: Optional["ECSClient"] = None,
context_injector: Optional[PipesContextInjector] = None,
message_reader: Optional[PipesMessageReader] = None,
forward_termination: bool = True,
):
self._client: ECSClient = client or boto3.client("ecs")
self._context_injector = context_injector or PipesEnvContextInjector()
self._message_reader = message_reader or PipesCloudWatchMessageReader()
self.forward_termination = check.bool_param(forward_termination, "forward_termination")
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
@public
def run( # pyright: ignore[reportIncompatibleMethodOverride]
self,
*,
context: Union[OpExecutionContext, AssetExecutionContext],
run_task_params: "RunTaskRequestTypeDef",
extras: Optional[dict[str, Any]] = None,
pipes_container_name: Optional[str] = None,
waiter_config: Optional[WaiterConfig] = None,
) -> PipesClientCompletedInvocation:
"""Run ECS tasks, enriched with the pipes protocol.
Args:
context (Union[OpExecutionContext, AssetExecutionContext]): The context of the currently executing Dagster op or asset.
run_task_params (dict): Parameters for the ``run_task`` boto3 ECS client call.
Must contain ``taskDefinition`` key.
See `Boto3 API Documentation <https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs/client/run_task.html#run-task>`_
extras (Optional[Dict[str, Any]]): Additional information to pass to the Pipes session in the external process.
pipes_container_name (Optional[str]): If running more than one container in the task,
and using :py:class:`PipesCloudWatchMessageReader`, specify the container name which will be running Pipes.
waiter_config (Optional[WaiterConfig]): Optional waiter configuration to use. Defaults to 70 days (Delay: 6, MaxAttempts: 1000000).
Returns:
PipesClientCompletedInvocation: Wrapper containing results reported by the external
process.
"""
waiter_config = waiter_config or WaiterConfig(Delay=6, MaxAttempts=1000000)
with open_pipes_session(
context=context,
message_reader=self._message_reader,
context_injector=self._context_injector,
extras=extras,
) as session:
# we can't be running more than 1 replica of the task
# because this guarantees multiple Pipes sessions running at the same time
# which we don't support yet
if run_task_params.get("count", 1) > 1:
raise DagsterInvariantViolationError(
"Running more than one ECS task is not supported."
)
task_definition = run_task_params["taskDefinition"]
cluster = run_task_params.get("cluster")
overrides = cast("dict", run_task_params.get("overrides") or {})
overrides["containerOverrides"] = overrides.get("containerOverrides", [])
# get all containers from task definition
task_definition_response = self._client.describe_task_definition(
taskDefinition=task_definition
)
log_configurations = {
container["name"]: container.get("logConfiguration") # pyright: ignore (reportTypedDictNotRequiredAccess)
for container in task_definition_response["taskDefinition"]["containerDefinitions"] # pyright: ignore (reportTypedDictNotRequiredAccess)
}
all_container_names = {
container["name"] # pyright: ignore (reportTypedDictNotRequiredAccess)
for container in task_definition_response["taskDefinition"]["containerDefinitions"] # pyright: ignore (reportTypedDictNotRequiredAccess)
}
container_names_with_overrides = {
container_override["name"] for container_override in overrides["containerOverrides"]
}
pipes_args = session.get_bootstrap_env_vars()
# set env variables for every container in the taskDefinition
# respecting current overrides provided by the user
environment_overrides = [
{
"name": k,
"value": v,
}
for k, v in pipes_args.items()
]
# set environment variables for existing overrides
for container_override in overrides["containerOverrides"]:
container_override["environment"] = container_override.get("environment", [])
container_override["environment"].extend(environment_overrides)
# set environment variables for containers that are not in the overrides
for container_name in all_container_names - container_names_with_overrides:
overrides["containerOverrides"].append(
{
"name": container_name,
"environment": environment_overrides,
}
)
run_task_params["overrides"] = ( # pyright: ignore (reportGeneralTypeIssues)
overrides # assign in case overrides was created here as an empty dict
)
# inject Dagster tags
tags = list(run_task_params.get("tags", []))
for key, value in session.default_remote_invocation_info.items():
tags.append({"key": key, "value": value})
run_task_params["tags"] = tags
response = self._client.run_task(**run_task_params)
if len(response["tasks"]) > 1:
# this error should never happen, as we're running a single task
raise DagsterInvariantViolationError(
f"Expected to get a single task from response, got multiple: {response['tasks']}"
)
task = response["tasks"][0]
task_arn = task["taskArn"] # pyright: ignore (reportTypedDictNotRequiredAccess)
task_id = task_arn.split("/")[-1]
containers = task["containers"] # pyright: ignore (reportTypedDictNotRequiredAccess)
def get_cloudwatch_params(container_name: str) -> Optional[dict[str, str]]:
"""This will either return the log group and stream for the container, or None in case of a bad log configuration."""
if log_config := log_configurations.get(container_name):
if log_config["logDriver"] == "awslogs":
log_group = log_config["options"]["awslogs-group"] # pyright: ignore (reportTypedDictNotRequiredAccess)
# stream name is combined from: prefix, container name, task id
log_stream = f"{log_config['options']['awslogs-stream-prefix']}/{container_name}/{task_id}" # pyright: ignore (reportTypedDictNotRequiredAccess)
return {"log_group": log_group, "log_stream": log_stream}
else:
context.log.warning(
f"[pipes] Unsupported log driver {log_config['logDriver']} for Pipes container {container_name} in task {task_arn}. Dagster Pipes won't be able to read CloudWatch logs from this container."
)
else:
context.log.warning(
f"[pipes] log configuration for container {container_name} not found in task definition {task_definition}."
)
return None
try:
if (
isinstance(self._message_reader, PipesCloudWatchMessageReader)
and len(containers) > 1
and not pipes_container_name
):
raise DagsterInvariantViolationError(
"When using PipesCloudWatchMessageReader with more than one container, pipes_container_name must be set."
)
elif (
isinstance(self._message_reader, PipesCloudWatchMessageReader)
and len(containers) == 1
):
pipes_container_name = containers[0]["name"] # pyright: ignore (reportTypedDictNotRequiredAccess)
if isinstance(self._message_reader, PipesCloudWatchMessageReader):
pipes_container_name = cast("str", pipes_container_name)
params = get_cloudwatch_params(pipes_container_name)
if params:
# update log group and stream for the message reader
# it should start receiving messages shortly after this call
session.report_launched({"extras": params})
# collect logs from all containers
# TODO: insert container names into the log message
# right now all logs will be mixed together, which is not very good
for container in containers:
container_name = container["name"] # pyright: ignore (reportTypedDictNotRequiredAccess)
if isinstance(self._message_reader, PipesCloudWatchMessageReader):
params = get_cloudwatch_params(container_name)
if params:
self._message_reader.add_log_reader(
PipesCloudWatchLogReader(
client=self._message_reader.client,
log_group=params["log_group"],
log_stream=params["log_stream"],
start_time=int(session.created_at.timestamp() * 1000),
debug_info=f"reader for container {container_name}",
),
)
response = self._wait_for_completion(
response, cluster=cluster, waiter_config=waiter_config
)
# check for failed containers
failed_containers = {}
for task in response["tasks"]:
for container in task["containers"]: # pyright: ignore (reportTypedDictNotRequiredAccess)
if container.get("exitCode") not in (0, None):
failed_containers[container["runtimeId"]] = container.get("exitCode") # pyright: ignore (reportTypedDictNotRequiredAccess)
if failed_containers:
raise RuntimeError(
f"Some ECS containers finished with non-zero exit code:\n{pformat(list(failed_containers.keys()))}"
)
except DagsterExecutionInterruptedError:
if self.forward_termination:
context.log.warning(
"[pipes] Dagster process interrupted, terminating ECS tasks"
)
self._terminate(context=context, wait_response=response, cluster=cluster)
raise
context.log.info(f"[pipes] ECS task {task_arn} completed")
return PipesClientCompletedInvocation(
session, metadata=self._extract_dagster_metadata(response)
)
def _wait_for_completion(
self,
start_response: "RunTaskResponseTypeDef",
cluster: Optional[str] = None,
waiter_config: Optional[WaiterConfig] = None,
) -> "DescribeTasksResponseTypeDef":
waiter = self._client.get_waiter("tasks_stopped")
params: dict[str, Any] = {"tasks": [start_response["tasks"][0]["taskArn"]]} # pyright: ignore (reportGeneralTypeIssues)
if cluster:
params["cluster"] = cluster
waiter_params = {"WaiterConfig": waiter_config, **params} if waiter_config else params
waiter.wait(**waiter_params)
return self._client.describe_tasks(**params)
def _extract_dagster_metadata(
self, response: "DescribeTasksResponseTypeDef"
) -> RawMetadataMapping:
metadata: RawMetadataMapping = {}
region = self._client.meta.region_name
task = response["tasks"][0]
task_id = task["taskArn"].split("/")[-1] # pyright: ignore (reportTypedDictNotRequiredAccess)
cluster = task["clusterArn"].split("/")[-1] # pyright: ignore (reportTypedDictNotRequiredAccess)
metadata["AWS ECS Task URL"] = MetadataValue.url(
f"https://{region}.console.aws.amazon.com/ecs/v2/clusters/{cluster}/tasks/{task_id}"
)
return metadata
def _terminate(
self,
context: Union[OpExecutionContext, AssetExecutionContext],
wait_response: "DescribeTasksResponseTypeDef",
cluster: Optional[str] = None,
):
task = wait_response["tasks"][0]
try:
self._client.stop_task(
cluster=cluster, # pyright: ignore ()
task=wait_response["tasks"][0]["taskArn"], # pyright: ignore (reportGeneralTypeIssues)
reason="Dagster process was interrupted",
)
except botocore.exceptions.ClientError as e: # pyright: ignore (reportAttributeAccessIssue)
context.log.warning(f"[pipes] Couldn't stop ECS task {task} in cluster {cluster}:\n{e}")
| PipesECSClient |
python | spack__spack | lib/spack/spack/installer.py | {
"start": 4544,
"end": 5382
} | class ____:
def __init__(self, pkg_count: int):
# Counters used for showing status information
self.pkg_num: int = 0
self.pkg_count: int = pkg_count
self.pkg_ids: Set[str] = set()
def next_pkg(self, pkg: "spack.package_base.PackageBase"):
pkg_id = package_id(pkg.spec)
if pkg_id not in self.pkg_ids:
self.pkg_num += 1
self.pkg_ids.add(pkg_id)
def set_term_title(self, text: str):
if not spack.config.get("config:install_status", True):
return
if not sys.stdout.isatty():
return
status = f"{text} {self.get_progress()}"
sys.stdout.write(f"\x1b]0;Spack: {status}\x07")
sys.stdout.flush()
def get_progress(self) -> str:
return f"[{self.pkg_num}/{self.pkg_count}]"
| InstallStatus |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E23.py | {
"start": 2830,
"end": 2930
} | class ____[A: object="foo"[::-1], B: object =[[["foo", "bar"]]], C: object= bytes]:
pass
| PEP696Good |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/checkpoint.py | {
"start": 16368,
"end": 29107
} | class ____:
"""Keeps the status of a name-based checkpoint restore."""
def __init__(self, save_path, dtype_map=None):
self.save_path = save_path
self.dtype_map = dtype_map
# A map from trackable objects to unused attribute names. We don't have
# proto IDs when doing a name-based restore, so the map keys differ from
# those in _CheckpointRestoreCoordinator.
self.unused_attributes = object_identity.ObjectIdentityWeakKeyDictionary()
self.restore_uid = ops.uid()
def globally_named_object_attributes(self, trackable):
"""Create globally named SaveableObjects from attributes.
If an object's attribute has no global name specified (default construction
for the SaveableObject factory), records the failure in
`self.unused_attributes` (which can then be used to make status assertions
fail; see `NameBasedSaverStatus`).
Args:
trackable: An object to save.
Yields:
SaveableObjects for `trackable`'s attributes.
"""
for (
attribute_name,
saveable_factory,
) in saveable_object_util.saveable_objects_from_trackable(
trackable, tf1_saver=True,
).items():
if callable(saveable_factory):
try:
# This saveable object factory does not have a default name= argument,
# which means there's no way to save/restore it using a name-based
# checkpoint. Ignore the error now and make sure assert_consumed()
# fails.
saveable = saveable_factory()
except TypeError:
self.unused_attributes.setdefault(trackable,
[]).append(attribute_name)
continue
else:
saveable = saveable_factory
names_to_saveables = saveable_object_util.op_list_to_dict(
[saveable], convert_variable_to_tensor=False)
for name, op in names_to_saveables.items():
for saveable_object in saveable_object_util.saveable_objects_for_op(
op=op, name=name):
yield saveable_object
def eager_restore(self, trackable):
"""Runs restore ops for `trackable`'s attributes."""
# When graph building, we don't add any restore ops to the graph until
# run_restore_ops/initialize_or_restore on the status object for name-based
# checkpoints.
assert context.executing_eagerly()
for saveable in self.globally_named_object_attributes(trackable):
restored_tensors = []
tensor_missing = False
for spec in saveable.specs:
if spec.name in self.dtype_map:
with ops.device("cpu:0"):
restored, = io_ops.restore_v2(
prefix=self.save_path,
tensor_names=[spec.name],
shape_and_slices=[""],
dtypes=[self.dtype_map[spec.name]],
name="%s_checkpoint_read" % (spec.name,))
restored_tensors.append(array_ops.identity(restored))
else:
tensor_missing = True
if tensor_missing:
# Record that this variable didn't match so assertions will fail.
self.unused_attributes.setdefault(trackable, []).append(saveable.name)
else:
# Ignores values missing from the checkpoint, as with object-based
# restore. Status assertions can be used to check exact matches,
# although it's unlikely to ever happen for name-based checkpoints.
saveable.restore(
restored_tensors=restored_tensors, restored_shapes=None)
# TODO(allenl): If this ends up in a public API, consider adding LINT.If Change
# or consolidating the implementation with get_variable.
def _default_getter(name,
shape,
dtype,
initializer=None,
partition_info=None,
**kwargs):
"""A pared-down version of get_variable which does not reuse variables."""
dtype = dtypes.as_dtype(dtype)
shape_object = tensor_shape.as_shape(shape)
with ops.init_scope():
if initializer is None:
initializer, initializing_from_value = (
variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access
name=name,
shape=shape_object,
dtype=dtype))
else:
initializing_from_value = not callable(initializer)
# Same logic as get_variable
variable_dtype = dtype.base_dtype
if initializing_from_value:
if shape is not None:
raise ValueError("If initializer is a constant, do not specify shape.")
initial_value = initializer
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
shape_list = None if shape is None else shape_object.as_list()
if "partition_info" in tf_inspect.getargspec(initializer).args:
initial_value = functools.partial(initializer,
shape_list,
dtype=dtype,
partition_info=partition_info)
else:
initial_value = functools.partial(initializer,
shape_list,
dtype=dtype)
return variable_v1.VariableV1(
initial_value=initial_value,
name=name,
dtype=variable_dtype,
use_resource=True,
**kwargs)
def add_variable(trackable,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
trainable=True):
"""Add a variable to a Trackable with no scope influence."""
return trackable._add_variable_with_custom_getter( # pylint: disable=protected-access
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
getter=_default_getter,
trainable=trainable)
def object_metadata(save_path):
"""Retrieves information about the objects in a checkpoint.
Example usage:
```python
object_graph = tf.contrib.checkpoint.object_metadata(
tf.train.latest_checkpoint(checkpoint_directory))
ckpt_variable_names = set()
for node in object_graph.nodes:
for attribute in node.attributes:
ckpt_variable_names.add(attribute.full_name)
```
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`.
Returns:
A parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer.
Raises:
ValueError: If an object graph was not found in the checkpoint.
"""
reader = py_checkpoint_reader.NewCheckpointReader(save_path)
try:
object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)
except errors_impl.NotFoundError:
raise ValueError(
f"The specified checkpoint \"{save_path}\" does not appear to be "
"object-based (saved with TF2) since it is missing the key "
f"\"{base.OBJECT_GRAPH_PROTO_KEY}\". Likely it was created with the "
"TF1 name-based saver and does not contain an object dependency graph.")
object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
return object_graph_proto
def list_objects(root_trackable):
"""Traverse the object graph and list all accessible objects.
Looks for `Trackable` objects which are dependencies of
`root_trackable`. Includes slot variables only if the variable they are
slotting for and the optimizer are dependencies of `root_trackable`
(i.e. if they would be saved with a checkpoint).
Args:
root_trackable: A `Trackable` object whose dependencies should be flattened.
Returns:
A flat list of objects.
"""
return util.list_objects(graph_view_lib.ObjectGraphView(root_trackable))
def gather_initializers(root_trackable):
"""Traverse the object graph and find initialization ops.
Looks for `Trackable` objects which are dependencies of
`root_trackable` and which have an `initializer` property. Includes
initializers for slot variables only if the variable they are slotting for and
the optimizer are dependencies of `root_trackable` (i.e. if they would be
saved with a checkpoint).
Args:
root_trackable: A `Trackable` object to gather initializers for.
Returns:
A list of initialization ops.
"""
trackable_objects = list_objects(root_trackable)
return [
c.initializer
for c in trackable_objects
if hasattr(c, "initializer") and c.initializer is not None
]
@tf_contextlib.contextmanager
def capture_dependencies(template):
"""Capture variables created within this scope as `Template` dependencies.
Requires that `template.variable_scope` is active.
This scope is intended as a compatibility measure, allowing a trackable
object to add dependencies on variables created in a block of code which is
not aware of object-based saving (and instead uses variable names
heavily). This is how `Template` objects add dependencies on variables and
sub-`Template`s. Where possible, use `tf.compat.v1.make_template` directly.
Args:
template: The `Template` object to register dependencies with.
Yields:
None (when used as a context manager).
"""
name_prefix = template.variable_scope.name
def _trackable_custom_creator(next_creator,
name,
initial_value,
trackable_parent=None,
**kwargs):
"""A variable creation hook which adds Trackable dependencies.
Set for example during a `Template`'s first wrapped function
execution. Ensures that (a) `template` depends on any trackable
objects using their own `capture_dependencies` scope inside this scope which
create variables, and (b) that any variables not in a more deeply nested
scope are added as dependencies directly.
The `trackable_parent` argument is passed between custom creators but
ignored when the variable object itself is created. This argument indicates
(if not `None`) that a more deeply nested scope has already added the
variable as a dependency, and that parent scopes should add a dependency on
that object rather than on the variable directly.
Args:
next_creator: See `variable_scope.variable_creator_scope`; the next
creator in the chain.
name: The (full, scope-influenced) name of the variable. The `name_prefix`
itself is stripped for the purposes of object-based dependency tracking,
but scopes opened within this scope are respected.
initial_value: See `variable_scope.variable_creator_scope`. Taken
explicitly so the argument can be re-named and used with
`Trackable._add_variable_with_custom_getter`.
trackable_parent: If not None, a more deeply nested trackable object and
its name prefix which were passed to `capture_dependencies` to add a
dependency on (rather than depending on the variable directly).
**kwargs: Passed through to the next creator.
Returns:
The output of `next_creator`: the fetched/created variable object.
"""
def _call_next_creator_renaming_initializer(initializer, **inner_kwargs):
inner_kwargs.pop("name") # Ignored; this is the scope-stripped name which
# we don't want to propagate.
return next_creator(initial_value=initializer, name=name, **inner_kwargs)
if name is not None and name.startswith(name_prefix):
scope_stripped_name = name[len(name_prefix) + 1:]
if not trackable_parent:
return template._add_variable_with_custom_getter( # pylint: disable=protected-access
initializer=initial_value,
name=scope_stripped_name,
getter=_call_next_creator_renaming_initializer,
# Disable error checking for Trackable. Exceptions are instead
# raised if necessary when the object-based saver tries to
# save/restore the object.
overwrite=True,
trackable_parent=(template, name_prefix),
**kwargs)
else:
parent_object, parent_name_prefix = trackable_parent
template._track_trackable( # pylint: disable=protected-access
parent_object,
name=parent_name_prefix[len(name_prefix) + 1:],
overwrite=True)
return next_creator(
name=name,
initial_value=initial_value,
trackable_parent=(template, name_prefix),
**kwargs)
with variable_scope.variable_creator_scope(_trackable_custom_creator):
yield
| _NameBasedRestoreCoordinator |
python | PyCQA__pylint | tests/functional/a/abstract/abstract_class_instantiated.py | {
"start": 856,
"end": 1147
} | class ____(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __iter__(self):
pass
@abc.abstractmethod
def __len__(self):
pass
@abc.abstractmethod
def __contains__(self, _):
pass
@abc.abstractmethod
def __hash__(self):
pass
| Structure |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws_tests/defs_state_storage_tests/test_defs_state_storage.py | {
"start": 979,
"end": 1968
} | class ____(TestDefsStateStorage):
"""Tests the blob storage state storage implementation."""
__test__ = True
@pytest.fixture(name="storage", scope="function")
def state_storage(self, mock_s3_bucket):
with instance_for_test(
overrides={
"defs_state_storage": {
"module": "dagster._core.storage.defs_state.blob_storage_state_storage",
"class": "UPathDefsStateStorage",
"config": {
"base_path": f"s3://{mock_s3_bucket.name}/foo",
"storage_options": _MOCK_STORAGE_OPTIONS,
},
}
}
) as instance:
state_storage = check.inst(instance.defs_state_storage, UPathDefsStateStorage)
# Check that we have an S3 filesystem
assert state_storage.base_path.fs.__class__.__name__ == "S3FileSystem"
yield state_storage
| TestS3UPathDefsStateStorage |
python | PyCQA__pylint | doc/data/messages/i/invalid-length-hint-returned/good.py | {
"start": 0,
"end": 121
} | class ____:
"""__length_hint__ returns <type 'int'>"""
def __length_hint__(self):
return 10
| CustomLengthHint |
python | crytic__slither | slither/core/solidity_types/type_alias.py | {
"start": 433,
"end": 1168
} | class ____(Type):
def __init__(self, underlying_type: ElementaryType, name: str) -> None:
super().__init__()
self.name = name
self.underlying_type = underlying_type
self._pattern = "type"
@property
def type(self) -> ElementaryType:
"""
Return the underlying type. Alias for underlying_type
Returns:
Type: the underlying type
"""
return self.underlying_type
@property
def storage_size(self) -> Tuple[int, bool]:
return self.underlying_type.storage_size
def __hash__(self) -> int:
return hash(str(self))
@property
def is_dynamic(self) -> bool:
return self.underlying_type.is_dynamic
| TypeAlias |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 40864,
"end": 41057
} | class ____(Sky2PixProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
| Sky2Pix_HEALPixPolar |
python | python__mypy | mypy/test/testutil.py | {
"start": 196,
"end": 915
} | class ____(TestCase):
def test_get_terminal_size_in_pty_defaults_to_80(self) -> None:
# when run using a pty, `os.get_terminal_size()` returns `0, 0`
ret = os.terminal_size((0, 0))
mock_environ = os.environ.copy()
mock_environ.pop("COLUMNS", None)
with mock.patch.object(os, "get_terminal_size", return_value=ret):
with mock.patch.dict(os.environ, values=mock_environ, clear=True):
assert get_terminal_width() == 80
def test_parse_location_windows(self) -> None:
assert parse_location(r"C:\test.py:1:1") == (r"C:\test.py", [1, 1])
assert parse_location(r"C:\test.py:1:1:1:1") == (r"C:\test.py", [1, 1, 1, 1])
| TestGetTerminalSize |
python | Netflix__metaflow | metaflow/decorators.py | {
"start": 8129,
"end": 10401
} | class ____(Decorator):
options = {}
def __init__(self, *args, **kwargs):
super(FlowDecorator, self).__init__(*args, **kwargs)
def flow_init(
self, flow, graph, environment, flow_datastore, metadata, logger, echo, options
):
"""
Called when all decorators have been created for this flow.
"""
pass
def get_top_level_options(self):
"""
Return a list of option-value pairs that correspond to top-level
options that should be passed to subprocesses (tasks). The option
names should be a subset of the keys in self.options.
If the decorator has a non-empty set of options in `self.options`, you
probably want to return the assigned values in this method.
"""
return []
# compare this to parameters.add_custom_parameters
def add_decorator_options(cmd):
flow_cls = getattr(current_flow, "flow_cls", None)
if flow_cls is None:
return cmd
seen = {}
existing_params = set(p.name.lower() for p in cmd.params)
# Add decorator options
for deco in flow_decorators(flow_cls):
for option, kwargs in deco.options.items():
if option in seen:
msg = (
"Flow decorator '%s' uses an option '%s' which is also "
"used by the decorator '%s'. This is a bug in Metaflow. "
"Please file a ticket on GitHub."
% (deco.name, option, seen[option])
)
raise MetaflowInternalError(msg)
elif deco.name.lower() in existing_params:
raise MetaflowInternalError(
"Flow decorator '%s' uses an option '%s' which is a reserved "
"keyword. Please use a different option name." % (deco.name, option)
)
else:
kwargs["envvar"] = "METAFLOW_FLOW_%s" % option.upper()
seen[option] = deco.name
cmd.params.insert(0, click.Option(("--" + option,), **kwargs))
return cmd
def flow_decorators(flow_cls):
return [
d
for deco_list in flow_cls._flow_state[FlowStateItems.FLOW_DECORATORS].values()
for d in deco_list
]
| FlowDecorator |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 58611,
"end": 58806
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
task_key: Optional[str] = None
| TaskDependency |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 585,
"end": 1532
} | class ____(BaseClass):
def normal(self):
super(MyClass, self).f() # can use super()
super().f()
def different_argument(self, other):
super(MyClass, other).f() # CANNOT use super()
def comprehension_scope(self):
[super(MyClass, self).f() for x in [1]] # CANNOT use super()
def inner_functions(self):
def outer_argument():
super(MyClass, self).f() # CANNOT use super()
def inner_argument(self):
super(MyClass, self).f() # can use super()
super().f()
outer_argument()
inner_argument(self)
def inner_class(self):
class InnerClass:
super(MyClass, self).f() # CANNOT use super()
def method(inner_self):
super(MyClass, self).f() # CANNOT use super()
InnerClass().method()
defined_outside = defined_outside
from dataclasses import dataclass
@dataclass
| MyClass |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail.py | {
"start": 2613,
"end": 2735
} | class ____(BaseModel):
x: str = Field(..., alias=x_alias)
z: int
DynamicAliasModel(y='y', z='1')
| DynamicAliasModel |
python | django__django | tests/runtests.py | {
"start": 11937,
"end": 27963
} | class ____(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
try:
import selenium # NOQA
except ImportError as e:
raise ImproperlyConfigured(f"Error loading selenium module: {e}")
browsers = values.split(",")
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(
self, "Selenium browser specification '%s' is not valid." % browser
)
setattr(namespace, self.dest, browsers)
def django_tests(
verbosity,
interactive,
failfast,
keepdb,
reverse,
test_labels,
debug_sql,
parallel,
tags,
exclude_tags,
test_name_patterns,
start_at,
start_after,
pdb,
buffer,
timing,
shuffle,
durations=None,
):
if parallel in {0, "auto"}:
max_parallel = get_max_test_processes()
else:
max_parallel = parallel
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(
django.__file__
)
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
process_setup_args = (verbosity, start_at, start_after, test_labels)
test_labels, state = setup_run_tests(*process_setup_args)
# Run the test suite, including the extra validation tests.
if not hasattr(settings, "TEST_RUNNER"):
settings.TEST_RUNNER = "django.test.runner.DiscoverRunner"
if parallel in {0, "auto"}:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
parallel = max_parallel
else:
parallel = 1
TestRunner = get_runner(settings)
TestRunner.parallel_test_suite.process_setup = setup_run_tests
TestRunner.parallel_test_suite.process_setup_args = process_setup_args
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=parallel,
tags=tags,
exclude_tags=exclude_tags,
test_name_patterns=test_name_patterns,
pdb=pdb,
buffer=buffer,
timing=timing,
shuffle=shuffle,
durations=durations,
)
failures = test_runner.run_tests(test_labels)
teardown_run_tests(state)
return failures
def collect_test_modules(start_at, start_after):
test_modules, state = setup_collect_tests(start_at, start_after)
teardown_collect_tests(state)
return test_modules
def get_subprocess_args(options):
subprocess_args = [sys.executable, __file__, "--settings=%s" % options.settings]
if options.failfast:
subprocess_args.append("--failfast")
if options.verbosity:
subprocess_args.append("--verbosity=%s" % options.verbosity)
if not options.interactive:
subprocess_args.append("--noinput")
if options.tags:
subprocess_args.append("--tag=%s" % options.tags)
if options.exclude_tags:
subprocess_args.append("--exclude_tag=%s" % options.exclude_tags)
if options.shuffle is not False:
if options.shuffle is None:
subprocess_args.append("--shuffle")
else:
subprocess_args.append("--shuffle=%s" % options.shuffle)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, start_at, start_after):
if not test_labels:
test_labels = collect_test_modules(start_at, start_after)
print("***** Bisecting test suite: %s" % " ".join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, "model_inheritance_same_model_name"]:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print("***** Pass %da: Running the first half of the test suite" % iteration)
print("***** Test labels: %s" % " ".join(test_labels_a))
failures_a = subprocess.run(subprocess_args + test_labels_a)
print("***** Pass %db: Running the second half of the test suite" % iteration)
print("***** Test labels: %s" % " ".join(test_labels_b))
print("")
failures_b = subprocess.run(subprocess_args + test_labels_b)
if failures_a.returncode and not failures_b.returncode:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b.returncode and not failures_a.returncode:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a.returncode and failures_b.returncode:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
def paired_tests(paired_test, options, test_labels, start_at, start_after):
if not test_labels:
test_labels = collect_test_modules(start_at, start_after)
print("***** Trying paired execution")
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, "model_inheritance_same_model_name"]:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print(
"***** %d of %d: Check test pairing with %s"
% (i + 1, len(test_labels), label)
)
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print("***** Found problem pair with %s" % label)
return
print("***** No problem pair found")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
"modules",
nargs="*",
metavar="module",
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
"-v",
"--verbosity",
default=1,
type=int,
choices=[0, 1, 2, 3],
help="Verbosity level; 0=minimal output, 1=normal output, 2=all output",
)
parser.add_argument(
"--noinput",
action="store_false",
dest="interactive",
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_argument(
"--failfast",
action="store_true",
help="Tells Django to stop running the test suite after first failed test.",
)
parser.add_argument(
"--keepdb",
action="store_true",
help="Tells Django to preserve the test database between runs.",
)
parser.add_argument(
"--settings",
help='Python path to settings module, e.g. "myproject.settings". If '
"this isn't provided, either the DJANGO_SETTINGS_MODULE "
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
"--bisect",
help="Bisect the test suite to discover a test that causes a test "
"failure when combined with the named test.",
)
parser.add_argument(
"--pair",
help="Run the test suite in pairs with the named test to find problem pairs.",
)
parser.add_argument(
"--shuffle",
nargs="?",
default=False,
type=int,
metavar="SEED",
help=(
"Shuffle the order of test cases to help check that tests are "
"properly isolated."
),
)
parser.add_argument(
"--reverse",
action="store_true",
help="Sort test suites and test cases in opposite order to debug "
"test side effects not apparent with normal execution lineup.",
)
parser.add_argument(
"--selenium",
action=ActionSelenium,
metavar="BROWSERS",
help="A comma-separated list of browsers to run the Selenium tests against.",
)
parser.add_argument(
"--screenshots",
action="store_true",
help="Take screenshots during selenium tests to capture the user interface.",
)
parser.add_argument(
"--headless",
action="store_true",
help="Run selenium tests in headless mode, if the browser supports the option.",
)
parser.add_argument(
"--selenium-hub",
help="A URL for a selenium hub instance to use in combination with --selenium.",
)
parser.add_argument(
"--external-host",
default=socket.gethostname(),
help=(
"The external host that can be reached by the selenium hub instance when "
"running Selenium tests via Selenium Hub."
),
)
parser.add_argument(
"--debug-sql",
action="store_true",
help="Turn on the SQL query logger within tests.",
)
# 0 is converted to "auto" or 1 later on, depending on a method used by
# multiprocessing to start subprocesses and on the backend support for
# cloning databases.
parser.add_argument(
"--parallel",
nargs="?",
const="auto",
default=0,
type=parallel_type,
metavar="N",
help=(
'Run tests using up to N parallel processes. Use the value "auto" '
"to run one test process for each processor core."
),
)
parser.add_argument(
"--tag",
dest="tags",
action="append",
help="Run only tests with the specified tags. Can be used multiple times.",
)
parser.add_argument(
"--exclude-tag",
dest="exclude_tags",
action="append",
help="Do not run tests with the specified tag. Can be used multiple times.",
)
parser.add_argument(
"--start-after",
dest="start_after",
help="Run tests starting after the specified top-level module.",
)
parser.add_argument(
"--start-at",
dest="start_at",
help="Run tests starting at the specified top-level module.",
)
parser.add_argument(
"--pdb", action="store_true", help="Runs the PDB debugger on error or failure."
)
parser.add_argument(
"-b",
"--buffer",
action="store_true",
help="Discard output of passing tests.",
)
parser.add_argument(
"--timing",
action="store_true",
help="Output timings, including database set up and total run time.",
)
parser.add_argument(
"-k",
dest="test_name_patterns",
action="append",
help=(
"Only run test methods and classes matching test name pattern. "
"Same as unittest -k option. Can be used multiple times."
),
)
parser.add_argument(
"--durations",
dest="durations",
type=int,
default=None,
metavar="N",
help="Show the N slowest test cases (N=0 for all).",
)
options = parser.parse_args()
using_selenium_hub = options.selenium and options.selenium_hub
if options.selenium_hub and not options.selenium:
parser.error(
"--selenium-hub and --external-host require --selenium to be used."
)
if using_selenium_hub and not options.external_host:
parser.error("--selenium-hub and --external-host must be used together.")
if options.screenshots and not options.selenium:
parser.error("--screenshots require --selenium to be used.")
if options.screenshots and options.tags:
parser.error("--screenshots and --tag are mutually exclusive.")
# Allow including a trailing slash on app_labels for tab completion
# convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
mutually_exclusive_options = [
options.start_at,
options.start_after,
options.modules,
]
enabled_module_options = [
bool(option) for option in mutually_exclusive_options
].count(True)
if enabled_module_options > 1:
print(
"Aborting: --start-at, --start-after, and test labels are mutually "
"exclusive."
)
sys.exit(1)
for opt_name in ["start_at", "start_after"]:
opt_val = getattr(options, opt_name)
if opt_val:
if "." in opt_val:
print(
"Aborting: --%s must be a top-level module."
% opt_name.replace("_", "-")
)
sys.exit(1)
setattr(options, opt_name, os.path.normpath(opt_val))
if options.settings:
os.environ["DJANGO_SETTINGS_MODULE"] = options.settings
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_sqlite")
options.settings = os.environ["DJANGO_SETTINGS_MODULE"]
if options.selenium:
if (
multiprocessing.get_start_method() in {"spawn", "forkserver"}
and options.parallel != 1
):
parser.error(
"You cannot use --selenium with parallel tests on this system. "
"Pass --parallel=1 to use --selenium."
)
if not options.tags:
options.tags = ["selenium"]
elif "selenium" not in options.tags:
options.tags.append("selenium")
if options.selenium_hub:
SeleniumTestCaseBase.selenium_hub = options.selenium_hub
SeleniumTestCaseBase.external_host = options.external_host
SeleniumTestCaseBase.headless = options.headless
SeleniumTestCaseBase.browsers = options.selenium
if options.screenshots:
options.tags = ["screenshot"]
SeleniumTestCase.screenshots = options.screenshots
if options.bisect:
bisect_tests(
options.bisect,
options,
options.modules,
options.start_at,
options.start_after,
)
elif options.pair:
paired_tests(
options.pair,
options,
options.modules,
options.start_at,
options.start_after,
)
else:
time_keeper = TimeKeeper() if options.timing else NullTimeKeeper()
with time_keeper.timed("Total run"):
failures = django_tests(
options.verbosity,
options.interactive,
options.failfast,
options.keepdb,
options.reverse,
options.modules,
options.debug_sql,
options.parallel,
options.tags,
options.exclude_tags,
options.test_name_patterns,
options.start_at,
options.start_after,
options.pdb,
options.buffer,
options.timing,
options.shuffle,
getattr(options, "durations", None),
)
time_keeper.print_results()
if failures:
sys.exit(1)
| ActionSelenium |
python | scipy__scipy | scipy/_lib/_testutils.py | {
"start": 1202,
"end": 3935
} | class ____:
"""
Run tests for this namespace
``scipy.test()`` runs tests for all of SciPy, with the default settings.
When used from a submodule (e.g., ``scipy.cluster.test()``, only the tests
for that namespace are run.
Parameters
----------
label : {'fast', 'full'}, optional
Whether to run only the fast tests, or also those marked as slow.
Default is 'fast'.
verbose : int, optional
Test output verbosity. Default is 1.
extra_argv : list, optional
Arguments to pass through to Pytest.
doctests : bool, optional
Whether to run doctests or not. Default is False.
coverage : bool, optional
Whether to run tests with code coverage measurements enabled.
Default is False.
tests : list of str, optional
List of module names to run tests for. By default, uses the module
from which the ``test`` function is called.
parallel : int, optional
Run tests in parallel with pytest-xdist, if number given is larger than
1. Default is 1.
"""
def __init__(self, module_name):
self.module_name = module_name
def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False,
coverage=False, tests=None, parallel=None):
import pytest
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
pytest_args = ['--showlocals', '--tb=short']
if extra_argv is None:
extra_argv = []
pytest_args += extra_argv
if any(arg == "-m" or arg == "--markers" for arg in extra_argv):
# Likely conflict with default --mode=fast
raise ValueError("Must specify -m before --")
if verbose and int(verbose) > 1:
pytest_args += ["-" + "v"*(int(verbose)-1)]
if coverage:
pytest_args += ["--cov=" + module_path]
if label == "fast":
pytest_args += ["-m", "not slow"]
elif label != "full":
pytest_args += ["-m", label]
if tests is None:
tests = [self.module_name]
if parallel is not None and parallel > 1:
if _pytest_has_xdist():
pytest_args += ['-n', str(parallel)]
else:
import warnings
warnings.warn('Could not run tests in parallel because '
'pytest-xdist plugin is not available.',
stacklevel=2)
pytest_args += ['--pyargs'] + list(tests)
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return (code == 0)
| PytestTester |
python | PyCQA__pylint | tests/functional/a/attribute_defined_outside_init.py | {
"start": 376,
"end": 466
} | class ____(A):
def test(self):
self.z = 44 # [attribute-defined-outside-init]
| B |
python | getsentry__sentry | tests/sentry/models/test_release.py | {
"start": 47874,
"end": 56460
} | class ____(TestCase):
def setUp(self) -> None:
self.org = self.create_organization()
self.fake_package = "_fake_package_prj_"
# Project with 10 semver releases
self.proj_1 = self.create_project(organization=self.org)
for i in range(10):
self.create_release(version=f"fake_package-ahmed@1.1.{i}", project=self.proj_1)
def test_follows_semver_with_all_releases_semver_and_semver_release_version(self) -> None:
"""
Test that ensures that when the last 10 releases and the release version passed in as an arg
follow semver versioning, then True should be returned
"""
assert (
follows_semver_versioning_scheme(
org_id=self.org.id, project_id=self.proj_1.id, release_version="fake_package@2.0.0"
)
is True
)
def test_follows_semver_all_releases_semver_and_missing_package_semver_release_version(
self,
) -> None:
"""
Test that ensures that even if a project is following semver, then if the release_version
supplied lacks a package, then for that specific release we opt the project out of being
considered a semver project
"""
assert (
follows_semver_versioning_scheme(
org_id=self.org.id, project_id=self.proj_1.id, release_version="2.0.0"
)
is False
)
def test_follows_semver_with_all_releases_semver_and_no_release_version(self) -> None:
"""
Test that ensures that when the last 10 releases follow semver versioning and no release
version is passed in as an argument, then True should be returned
"""
assert (
follows_semver_versioning_scheme(org_id=self.org.id, project_id=self.proj_1.id) is True
)
def test_follows_semver_with_all_releases_semver_and_non_semver_release_version(self) -> None:
"""
Test that ensures that even if the last 10 releases follow semver but the passed in
release_version doesn't then we should return False because we should not follow semver
versioning in this case
"""
assert (
follows_semver_versioning_scheme(
org_id=self.org.id, project_id=self.proj_1.id, release_version="fizbuzz"
)
is False
)
def test_follows_semver_user_accidentally_stopped_using_semver_a_few_times(self) -> None:
"""
Test that ensures that when a user accidentally stops using semver versioning for a few
times but there exists at least one semver compliant release in the last 3 releases and
at least 3 releases that are semver compliant in the last 10 then we still consider
project to be following semantic versioning
"""
proj = self.create_project(organization=self.org)
for i in range(2):
self.create_release(version=f"{self.fake_package}{proj.id}@1.{i}", project=proj)
for i in range(7):
self.create_release(version=f"foo release {i}", project=proj)
self.create_release(version=f"{self.fake_package}{proj.id}@1.9", project=proj)
assert (
follows_semver_versioning_scheme(
org_id=self.org.id,
project_id=proj.id,
)
is True
)
def test_follows_semver_user_stops_using_semver(self) -> None:
"""
Test that ensures that if a user stops using semver and so the last 3 releases in the last
10 releases are all non-semver releases, then the project does not follow semver anymore
since 1st condition of at least one semver release in the last 3 has to be a semver
release is not satisfied
"""
proj = self.create_project(organization=self.org)
for i in range(7):
self.create_release(version=f"{self.fake_package}{proj.id}@1.{i}", project=proj)
for i in range(3):
self.create_release(version=f"helloworld {i}", project=proj)
assert (
follows_semver_versioning_scheme(
org_id=self.org.id,
project_id=proj.id,
)
is False
)
def test_follows_semver_user_accidentally_uses_semver_a_few_times(self) -> None:
"""
Test that ensures that if user accidentally uses semver compliant versions for a few
times then the project will not be considered to be using semver
"""
proj = self.create_project(organization=self.org)
for i in range(8):
self.create_release(version=f"foo release {i}", project=proj)
for i in range(2):
self.create_release(version=f"{self.fake_package}{proj.id}@1.{i}", project=proj)
assert (
follows_semver_versioning_scheme(
org_id=self.org.id,
project_id=proj.id,
)
is False
)
def test_follows_semver_user_starts_using_semver(self) -> None:
"""
Test that ensures if a user starts using semver by having at least the last 3 releases
using semver then we consider the project to be using semver
"""
proj = self.create_project(organization=self.org)
for i in range(7):
self.create_release(version=f"foo release {i}", project=proj)
for i in range(3):
self.create_release(version=f"{self.fake_package}{proj.id}@1.{i}", project=proj)
assert (
follows_semver_versioning_scheme(
org_id=self.org.id,
project_id=proj.id,
)
is True
)
def test_follows_semver_user_starts_using_semver_with_less_than_10_recent_releases(
self,
) -> None:
"""
Test that ensures that a project with only 5 (<10) releases and at least one semver
release in the most recent releases is considered to be following semver
"""
proj = self.create_project(organization=self.org)
for i in range(4):
self.create_release(version=f"helloworld {i}", project=proj)
self.create_release(version=f"{self.fake_package}{proj.id}@1.0", project=proj)
assert (
follows_semver_versioning_scheme(
org_id=self.org.id,
project_id=proj.id,
)
is True
)
def test_follows_semver_check_when_project_only_has_two_releases(self) -> None:
"""
Test that ensures that when a project has only two releases, then we consider project to
be semver or not based on if the most recent release follows semver or not
"""
# Case: User just started using semver
proj = self.create_project(organization=self.org)
self.create_release(version="helloworld 0", project=proj)
self.create_release(version=f"{self.fake_package}{proj.id}@1.0", project=proj)
assert (
follows_semver_versioning_scheme(
org_id=self.org.id,
project_id=proj.id,
)
is True
)
# Case: User just stopped using semver
proj_2 = self.create_project(organization=self.org)
self.create_release(version=f"{self.fake_package}{proj_2.id}@1.0", project=proj_2)
self.create_release(version="helloworld 1", project=proj_2)
assert (
follows_semver_versioning_scheme(
org_id=self.org.id,
project_id=proj_2.id,
)
is False
)
def test_follows_semver_check_with_archived_non_semver_releases(self) -> None:
"""
Test that ensures that when a project has a mix of archived non-semver releases and active semver releases,
then we consider the project to be following semver.
"""
proj = self.create_project(organization=self.org)
# Create semver releases that are not archived
for i in range(4):
self.create_release(version=f"{self.fake_package}@1.0.{i}", project=proj)
# Create non-semver releases and archive them
for i in range(6):
release = self.create_release(version=f"notsemver-{i}", project=proj)
release.update(status=ReleaseStatus.ARCHIVED)
assert (
follows_semver_versioning_scheme(
org_id=self.org.id,
project_id=proj.id,
)
is True
)
| FollowsSemverVersioningSchemeTestCase |
python | pdm-project__pdm | src/pdm/cli/commands/add.py | {
"start": 694,
"end": 7131
} | class ____(BaseCommand):
"""Add package(s) to pyproject.toml and install them"""
arguments = (
*BaseCommand.arguments,
lockfile_option,
frozen_lockfile_option,
save_strategy_group,
override_option,
update_strategy_group,
prerelease_option,
unconstrained_option,
packages_group,
install_group,
dry_run_option,
venv_option,
skip_option,
)
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"-d",
"--dev",
default=False,
action="store_true",
help="Add packages into dev dependencies",
)
parser.add_argument("-G", "--group", help="Specify the target dependency group to add into")
parser.add_argument(
"--no-sync",
dest="sync",
default=True,
action="store_false",
help="Only write pyproject.toml and do not sync the working set",
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
if options.editables and options.no_editable:
raise PdmUsageError("`--no-editable` cannot be used with `-e/--editable`")
self.do_add(
project,
selection=GroupSelection.from_options(project, options),
sync=options.sync,
save=options.save_strategy or project.config["strategy.save"],
strategy=options.update_strategy or project.config["strategy.update"],
editables=options.editables,
packages=options.packages,
unconstrained=options.unconstrained,
no_editable=options.no_editable,
no_self=options.no_self,
dry_run=options.dry_run,
prerelease=options.prerelease,
fail_fast=options.fail_fast,
hooks=HookManager(project, options.skip),
)
@staticmethod
def do_add(
project: Project,
*,
selection: GroupSelection,
sync: bool = True,
save: str = "compatible",
strategy: str = "reuse",
editables: Collection[str] = (),
packages: Collection[str] = (),
unconstrained: bool = False,
no_editable: bool = False,
no_self: bool = False,
dry_run: bool = False,
prerelease: bool | None = None,
fail_fast: bool = False,
hooks: HookManager | None = None,
) -> None:
"""Add packages and install"""
from pdm.cli.actions import do_lock, do_sync
from pdm.cli.utils import check_project_file, save_version_specifiers
from pdm.models.requirements import parse_requirement
from pdm.models.specifiers import get_specifier
from pdm.utils import normalize_name
hooks = hooks or HookManager(project)
check_project_file(project)
if editables and no_editable:
raise PdmUsageError("Cannot use --no-editable with editable packages given.")
group = selection.one()
tracked_names: set[str] = set()
requirements: list[Requirement] = []
lock_groups = ["default"] if project.lockfile.empty() else project.lockfile.groups
if lock_groups is not None and group not in lock_groups:
if project.enable_write_lockfile:
project.core.ui.info(f"Adding group [success]{group}[/] to lockfile")
lock_groups.append(group)
if group == "default" or (
not selection.dev and normalize_name(group) not in project.pyproject.dev_dependencies
):
if editables:
raise PdmUsageError("Cannot add editables to the default or optional dependency group")
for r in [parse_requirement(line, True) for line in editables] + [parse_requirement(line) for line in packages]:
if project.is_distribution and normalize_name(name := project.name) == r.key and not r.extras:
project.core.ui.warn(f"Package [req]{name}[/] is the project itself.")
continue
if r.is_file_or_url:
r.relocate(project.backend) # type: ignore[attr-defined]
key = r.identify()
tracked_names.add(key)
requirements.append(r)
if requirements:
project.core.ui.echo(
f"Adding {'[bold]global[/] ' if project.is_global else ''}packages to [primary]{group}[/] "
f"{'dev-' if selection.dev else ''}dependencies: "
+ ", ".join(f"[req]{r.as_line()}[/]" for r in requirements)
)
project.add_dependencies(requirements, group, selection.dev or False, write=False)
all_dependencies = project.all_dependencies
group_deps = all_dependencies[group]
for req in group_deps:
if req.identify() in tracked_names:
req.prerelease = prerelease
if unconstrained:
if not requirements:
raise PdmUsageError("--unconstrained requires at least one package")
for req in group_deps:
req.specifier = get_specifier("")
reqs = [r for g, deps in all_dependencies.items() for r in deps if lock_groups is None or g in lock_groups]
with hooks.skipping("post_lock"):
resolved = do_lock(
project,
strategy,
tracked_names,
reqs,
dry_run=True,
hooks=hooks,
groups=lock_groups,
)
# Update dependency specifiers and lockfile hash.
deps_to_update = group_deps if unconstrained else requirements
save_version_specifiers(deps_to_update, resolved, save)
if not dry_run:
project.add_dependencies(deps_to_update, group, selection.dev or False)
project.write_lockfile(show_message=False)
hooks.try_emit("post_lock", resolution=resolved, dry_run=dry_run)
if sync:
do_sync(
project,
selection=GroupSelection(project, groups=[group], default=False),
no_editable=no_editable and tracked_names,
no_self=no_self or group != "default",
requirements=list(group_deps),
dry_run=dry_run,
fail_fast=fail_fast,
hooks=hooks,
)
| Command |
python | numpy__numpy | benchmarks/benchmarks/bench_random.py | {
"start": 5049,
"end": 5377
} | class ____(Benchmark):
params = [1e3, 1e6, 1e8]
def setup(self, v):
self.a = np.arange(v)
self.rng = np.random.default_rng()
def time_legacy_choice(self, v):
np.random.choice(self.a, 1000, replace=False)
def time_choice(self, v):
self.rng.choice(self.a, 1000, replace=False)
| Choice |
python | mkdocs__mkdocs | mkdocs/utils/yaml.py | {
"start": 1061,
"end": 1546
} | class ____(os.PathLike):
def __init__(self, config: MkDocsConfig, suffix: str = ''):
self.config = config
self.suffix = suffix
def value(self) -> str:
raise NotImplementedError
def __fspath__(self) -> str:
"""Can be used as a path."""
return os.path.join(self.value(), self.suffix)
def __str__(self) -> str:
"""Can be converted to a string to obtain the current class."""
return self.__fspath__()
| _DirPlaceholder |
python | astropy__astropy | astropy/visualization/lupton_rgb.py | {
"start": 8244,
"end": 9537
} | class ____(Mapping):
"""
A mapping for an asinh stretch (preserving colours independent of brightness).
x = asinh(Q (I - minimum)/stretch)/Q
This reduces to a linear stretch if Q == 0
See https://ui.adsabs.harvard.edu/abs/2004PASP..116..133L
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
stretch : float
The linear stretch of the image.
Q : float
The asinh softening parameter.
"""
def __init__(self, minimum, stretch, Q=8):
Mapping.__init__(self, minimum)
# 32bit floating point machine epsilon; sys.float_info.epsilon is 64bit
epsilon = 1.0 / 2**23
if abs(Q) < epsilon:
Q = 0.1
else:
Qmax = 1e10
if Q > Qmax:
Q = Qmax
frac = 0.1 # gradient estimated using frac*stretch is _slope
self._slope = frac * self._uint8Max / np.arcsinh(frac * Q)
self._soften = Q / float(stretch)
def map_intensity_to_uint8(self, I):
# n.b. np.where can't and doesn't short-circuit
with np.errstate(invalid="ignore", divide="ignore"):
return np.where(I <= 0, 0, np.arcsinh(I * self._soften) * self._slope / I)
| AsinhMapping |
python | getsentry__sentry | src/sentry/utils/committers.py | {
"start": 6168,
"end": 6270
} | class ____(TypedDict):
author: Author | None
commits: Sequence[tuple[Commit, int]]
| AuthorCommits |
python | mwaskom__seaborn | tests/test_base.py | {
"start": 10770,
"end": 15642
} | class ____:
def test_plotter_default_init(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y"),
)
assert not hasattr(p, "_size_map")
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", size="a"),
)
assert isinstance(p._size_map, SizeMapping)
assert p._size_map.map_type == p.var_types["size"]
def test_plotter_customization(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", size="a"),
)
sizes = [1, 4, 2]
size_order = ["b", "a", "c"]
p.map_size(sizes=sizes, order=size_order)
assert p._size_map.lookup_table == dict(zip(size_order, sizes))
assert p._size_map.levels == size_order
def test_size_map_null(self, flat_series, null_series):
p = VectorPlotter(variables=dict(x=flat_series, size=null_series))
m = HueMapping(p)
assert m.levels is None
assert m.map_type is None
assert m.norm is None
assert m.lookup_table is None
def test_map_size_numeric(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", size="s"),
)
# Test default range of keys in the lookup table values
m = SizeMapping(p)
size_values = m.lookup_table.values()
value_range = min(size_values), max(size_values)
assert value_range == p._default_size_range
# Test specified range of size values
sizes = 1, 5
m = SizeMapping(p, sizes=sizes)
size_values = m.lookup_table.values()
assert min(size_values), max(size_values) == sizes
# Test size values with normalization range
norm = 1, 10
m = SizeMapping(p, sizes=sizes, norm=norm)
normalize = mpl.colors.Normalize(*norm, clip=True)
for key, val in m.lookup_table.items():
assert val == sizes[0] + (sizes[1] - sizes[0]) * normalize(key)
# Test size values with normalization object
norm = mpl.colors.LogNorm(1, 10, clip=False)
m = SizeMapping(p, sizes=sizes, norm=norm)
assert m.norm.clip
for key, val in m.lookup_table.items():
assert val == sizes[0] + (sizes[1] - sizes[0]) * norm(key)
# Test bad sizes argument
with pytest.raises(ValueError):
SizeMapping(p, sizes="bad_sizes")
# Test bad sizes argument
with pytest.raises(ValueError):
SizeMapping(p, sizes=(1, 2, 3))
# Test bad norm argument
with pytest.raises(ValueError):
SizeMapping(p, norm="bad_norm")
def test_map_size_categorical(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", size="a"),
)
# Test specified size order
levels = p.plot_data["size"].unique()
sizes = [1, 4, 6]
order = [levels[1], levels[2], levels[0]]
m = SizeMapping(p, sizes=sizes, order=order)
assert m.lookup_table == dict(zip(order, sizes))
# Test list of sizes
order = categorical_order(p.plot_data["size"])
sizes = list(np.random.rand(len(levels)))
m = SizeMapping(p, sizes=sizes)
assert m.lookup_table == dict(zip(order, sizes))
# Test dict of sizes
sizes = dict(zip(levels, np.random.rand(len(levels))))
m = SizeMapping(p, sizes=sizes)
assert m.lookup_table == sizes
# Test specified size range
sizes = (2, 5)
m = SizeMapping(p, sizes=sizes)
values = np.linspace(*sizes, len(m.levels))[::-1]
assert m.lookup_table == dict(zip(m.levels, values))
# Test explicit categories
p = VectorPlotter(data=long_df, variables=dict(x="x", size="a_cat"))
m = SizeMapping(p)
assert m.levels == long_df["a_cat"].cat.categories.to_list()
assert m.map_type == "categorical"
# Test sizes list with wrong length
sizes = list(np.random.rand(len(levels) + 1))
with pytest.warns(UserWarning):
SizeMapping(p, sizes=sizes)
# Test sizes dict with missing levels
sizes = dict(zip(levels, np.random.rand(len(levels) - 1)))
with pytest.raises(ValueError):
SizeMapping(p, sizes=sizes)
# Test bad sizes argument
with pytest.raises(ValueError):
SizeMapping(p, sizes="bad_size")
def test_array_palette_deprecation(self, long_df):
p = VectorPlotter(long_df, {"y": "y", "hue": "s"})
pal = mpl.cm.Blues([.3, .5, .8])[:, :3]
with pytest.warns(UserWarning, match="Numpy array is not a supported type"):
m = HueMapping(p, pal)
assert m.palette == pal.tolist()
| TestSizeMapping |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/dataplex.py | {
"start": 1139,
"end": 4851
} | class ____(BaseTrigger):
"""
DataplexDataQualityJobTrigger runs on the trigger worker and waits for the job to be `SUCCEEDED` state.
:param job_id: Optional. The ID of a Dataplex job.
:param data_scan_id: Required. DataScan identifier.
:param project_id: Google Cloud Project where the job is running.
:param region: The ID of the Google Cloud region that the job belongs to.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval_seconds: polling period in seconds to check for the status.
"""
def __init__(
self,
job_id: str | None,
data_scan_id: str,
project_id: str | None,
region: str,
gcp_conn_id: str = "google_cloud_default",
polling_interval_seconds: int = 10,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.job_id = job_id
self.data_scan_id = data_scan_id
self.project_id = project_id
self.region = region
self.gcp_conn_id = gcp_conn_id
self.polling_interval_seconds = polling_interval_seconds
self.impersonation_chain = impersonation_chain
def serialize(self):
return (
"airflow.providers.google.cloud.triggers.dataplex.DataplexDataQualityJobTrigger",
{
"job_id": self.job_id,
"data_scan_id": self.data_scan_id,
"project_id": self.project_id,
"region": self.region,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"polling_interval_seconds": self.polling_interval_seconds,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
hook = DataplexAsyncHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
while True:
job = await hook.get_data_scan_job(
project_id=self.project_id,
region=self.region,
job_id=self.job_id,
data_scan_id=self.data_scan_id,
)
state = job.state
if state in (DataScanJob.State.FAILED, DataScanJob.State.SUCCEEDED, DataScanJob.State.CANCELLED):
break
self.log.info(
"Current state is: %s, sleeping for %s seconds.",
DataScanJob.State(state).name,
self.polling_interval_seconds,
)
await asyncio.sleep(self.polling_interval_seconds)
yield TriggerEvent(
{
"job_id": self.job_id,
"job_state": DataScanJob.State(state).name,
"job": self._convert_to_dict(job),
}
)
def _convert_to_dict(self, job: DataScanJob) -> dict:
"""Return a representation of a DataScanJob instance as a dict."""
return DataScanJob.to_dict(job)
| DataplexDataQualityJobTrigger |
python | kamyu104__LeetCode-Solutions | Python/add-one-row-to-tree.py | {
"start": 29,
"end": 154
} | class ____(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
| TreeNode |
python | python-openxml__python-docx | src/docx/table.py | {
"start": 11188,
"end": 12176
} | class ____(Parented):
"""Table column."""
def __init__(self, gridCol: CT_TblGridCol, parent: TableParent):
super(_Column, self).__init__(parent)
self._parent = parent
self._gridCol = gridCol
@property
def cells(self) -> tuple[_Cell, ...]:
"""Sequence of |_Cell| instances corresponding to cells in this column."""
return tuple(self.table.column_cells(self._index))
@property
def table(self) -> Table:
"""Reference to the |Table| object this column belongs to."""
return self._parent.table
@property
def width(self) -> Length | None:
"""The width of this column in EMU, or |None| if no explicit width is set."""
return self._gridCol.w
@width.setter
def width(self, value: Length | None):
self._gridCol.w = value
@property
def _index(self):
"""Index of this column in its table, starting from zero."""
return self._gridCol.gridCol_idx
| _Column |
python | PyCQA__pylint | tests/functional/r/regression_02/regression_no_member_7631.py | {
"start": 215,
"end": 250
} | class ____(Base):
attr: int
| Parent |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-rankllm-rerank/llama_index/postprocessor/rankllm_rerank/base.py | {
"start": 667,
"end": 6575
} | class ____(BaseNodePostprocessor):
"""
RankLLM reranking suite. This class allows access to several reranking models supported by RankLLM. To use a model offered by the RankLLM suite, pass the desired model's hugging face path, found at https://huggingface.co/castorini. e.g., to access LiT5-Distill-base, pass 'castorini/LiT5-Distill-base' as the model name (https://huggingface.co/castorini/LiT5-Distill-base).
Below are all the rerankers supported with the model name to be passed as an argument to the constructor. Some model have convenience names for ease of use:
Listwise:
- OSLLM (Open Source LLM). Takes in a valid Hugging Face model name. e.g., 'Qwen/Qwen2.5-7B-Instruct'
- RankZephyr. model='rank_zephyr' or 'castorini/rank_zephyr_7b_v1_full'
- RankVicuna. model='rank_zephyr' or 'castorini/rank_vicuna_7b_v1'
- RankGPT. Takes in a valid gpt model. e.g., 'gpt-3.5-turbo', 'gpt-4','gpt-3'
- GenAI. Takes in a valid gemini model. e.g., 'gemini-2.0-flash'
Pairwise:
- DuoT5. model='duot5'
Pointwise:
- MonoT5. model='monot5'
"""
model: str = Field(description="Model name.", default="rank_zephyr")
top_n: Optional[int] = Field(
description="Number of nodes to return sorted by reranking score."
)
window_size: int = Field(
description="Reranking window size. Applicable only for listwise and pairwise models.",
default=20,
)
batch_size: Optional[int] = Field(
description="Reranking batch size. Applicable only for pointwise models."
)
context_size: int = Field(
description="Maximum number of tokens for the context window.", default=4096
)
prompt_mode: PromptMode = Field(
description="Prompt format and strategy used when invoking the reranking model.",
default=PromptMode.RANK_GPT,
)
num_gpus: int = Field(
description="Number of GPUs to use for inference if applicable.", default=1
)
num_few_shot_examples: int = Field(
description="Number of few-shot examples to include in the prompt.", default=0
)
few_shot_file: Optional[str] = Field(
description="Path to a file containing few-shot examples, used if few-shot prompting is enabled.",
default=None,
)
use_logits: bool = Field(
description="Whether to use raw logits for reranking scores instead of probabilities.",
default=False,
)
use_alpha: bool = Field(
description="Whether to apply an alpha scaling factor in the reranking score calculation.",
default=False,
)
variable_passages: bool = Field(
description="Whether to allow passages of variable lengths instead of fixed-size chunks.",
default=False,
)
stride: int = Field(
description="Stride to use when sliding over long documents for reranking.",
default=10,
)
use_azure_openai: bool = Field(
description="Whether to use Azure OpenAI instead of the standard OpenAI API.",
default=False,
)
_reranker: Any = PrivateAttr()
@classmethod
def class_name(cls) -> str:
return "RankLLMRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
kwargs = {
"model_path": self.model,
"default_model_coordinator": None,
"context_size": self.context_size,
"prompt_mode": self.prompt_mode,
"num_gpus": self.num_gpus,
"use_logits": self.use_logits,
"use_alpha": self.use_alpha,
"num_few_shot_examples": self.num_few_shot_examples,
"few_shot_file": self.few_shot_file,
"variable_passages": self.variable_passages,
"interactive": False,
"window_size": self.window_size,
"stride": self.stride,
"use_azure_openai": self.use_azure_openai,
}
model_coordinator = Reranker.create_model_coordinator(**kwargs)
self._reranker = Reranker(model_coordinator)
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.model,
)
)
docs = [
(node.get_content(metadata_mode=MetadataMode.EMBED), node.get_score())
for node in nodes
]
request = Request(
query=Query(
text=query_bundle.query_str,
qid=1,
),
candidates=[
Candidate(
docid=index,
score=doc[1],
doc={
"body": doc[0],
"headings": "",
"title": "",
"url": "",
},
)
for index, doc in enumerate(docs)
],
)
# scores are maintained the same as generated from the retriever
permutation = self._reranker.rerank(
request,
rank_end=len(request.candidates),
rank_start=0,
shuffle_candidates=False,
logging=False,
top_k_retrieve=len(request.candidates),
)
new_nodes: List[NodeWithScore] = []
for candidate in permutation.candidates:
id: int = int(candidate.docid)
new_nodes.append(NodeWithScore(node=nodes[id].node, score=nodes[id].score))
if self.top_n is None:
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
else:
dispatcher.event(ReRankEndEvent(nodes=new_nodes[: self.top_n]))
return new_nodes[: self.top_n]
| RankLLMRerank |
python | pytorch__pytorch | test/distributed/test_store.py | {
"start": 31104,
"end": 33871
} | class ____(TestCase):
def test_optional_methods_fail(self):
class TestStore(dist.Store):
pass
store = TestStore()
self.assertFalse(store.has_extended_api())
with self.assertRaisesRegex(RuntimeError, "Not implemented."):
store.append("foo", "bar")
with self.assertRaisesRegex(RuntimeError, "Not implemented."):
store.multi_get(["foo", "bar"])
with self.assertRaisesRegex(RuntimeError, "Not implemented."):
store.multi_set(["foo", "bar"], [b"v", b"v"])
def test_has_extended_api_passthrough(self):
class TestStore(dist.Store):
pass
test_store = TestStore()
store = dist.PrefixStore("p", test_store)
self.assertFalse(store.has_extended_api())
with self.assertRaisesRegex(RuntimeError, "Not implemented."):
store.append("foo", "bar")
with self.assertRaisesRegex(RuntimeError, "Not implemented."):
store.multi_get(["foo", "bar"])
with self.assertRaisesRegex(RuntimeError, "Not implemented."):
store.multi_set(["foo", "bar"], [b"v", b"v"])
def test_has_extended_api_roundtrip(self):
store = DummyStore()
prefix = dist.PrefixStore("p", store)
self.assertTrue(prefix.has_extended_api())
def test_append_roundtrip(self):
store = DummyStore()
prefix = dist.PrefixStore("p", store)
prefix.append("foo", "bar")
self.assertEqual(1, len(store.appends))
self.assertEqual(("p/foo", b"bar"), store.appends[0])
def test_multi_get_roundtrip(self):
store = DummyStore()
prefix = dist.PrefixStore("p", store)
store.multi_get_res.append([b"x", b"y"])
res = prefix.multi_get(["foo", "bar"])
self.assertEqual(1, len(store.multi_gets))
self.assertEqual(["p/foo", "p/bar"], store.multi_gets[0])
self.assertEqual([b"x", b"y"], res)
def test_multi_set_roundtrip(self):
store = DummyStore()
prefix = dist.PrefixStore("p", store)
prefix.multi_set(["foo", "bar"], [b"x", b"y"])
self.assertEqual(1, len(store.multi_sets))
self.assertEqual(["p/foo", "p/bar"], store.multi_sets[0][0])
self.assertEqual([b"x", b"y"], store.multi_sets[0][1])
def test_extended_methods_fallbacks(self):
test_store = MyPythonStore()
store = dist.PrefixStore("p", test_store)
self.assertFalse(store.has_extended_api())
store.append("foo", b"po")
store.append("foo", b"tato")
self.assertEqual(store.get("foo"), b"potato")
store.multi_set(["a", "b"], [b"c", b"d"])
self.assertEqual(store.multi_get(["a", "b", "foo"]), [b"c", b"d", b"potato"])
| TestPythonStore |
python | ray-project__ray | python/ray/data/tests/test_delta_sharing.py | {
"start": 3747,
"end": 5475
} | class ____(unittest.TestCase):
def test_valid_url(self):
url = "profile#share.schema.table"
expected_result = ("profile", "share", "schema", "table")
self.assertEqual(_parse_delta_sharing_url(url), expected_result)
def test_missing_hash(self):
url = "profile-share.schema.table"
with self.assertRaises(ValueError) as context:
_parse_delta_sharing_url(url)
self.assertEqual(str(context.exception), f"Invalid 'url': {url}")
def test_missing_fragments(self):
url = "profile#share.schema"
with self.assertRaises(ValueError) as context:
_parse_delta_sharing_url(url)
self.assertEqual(str(context.exception), f"Invalid 'url': {url}")
def test_empty_profile(self):
url = "#share.schema.table"
with self.assertRaises(ValueError) as context:
_parse_delta_sharing_url(url)
self.assertEqual(str(context.exception), f"Invalid 'url': {url}")
def test_empty_share(self):
url = "profile#.schema.table"
with self.assertRaises(ValueError) as context:
_parse_delta_sharing_url(url)
self.assertEqual(str(context.exception), f"Invalid 'url': {url}")
def test_empty_schema(self):
url = "profile#share..table"
with self.assertRaises(ValueError) as context:
_parse_delta_sharing_url(url)
self.assertEqual(str(context.exception), f"Invalid 'url': {url}")
def test_empty_table(self):
url = "profile#share.schema."
with self.assertRaises(ValueError) as context:
_parse_delta_sharing_url(url)
self.assertEqual(str(context.exception), f"Invalid 'url': {url}")
| TestParseDeltaSharingUrl |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/integration_tests/lib/duckdb_component/step_one.py | {
"start": 262,
"end": 1410
} | class ____(dg.Component):
"""A component that allows you to write SQL without learning dbt or Dagster's concepts."""
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
name = "op_name"
asset_specs = [dg.AssetSpec(key="the_key")]
path = (context.path / Path("raw_customers.csv")).absolute()
assert path.exists(), f"Path {path} does not exist."
@dg.multi_asset(name=name, specs=asset_specs)
def _asset(context: dg.AssetExecutionContext):
return self.execute(context, str(path))
return dg.Definitions(assets=[_asset])
def execute(self, context: dg.AssetExecutionContext, csv_path: str):
# Connect to DuckDB
con = duckdb.connect()
query = f"SELECT * FROM '{csv_path}'"
# Read CSV from parent directory
df = con.execute(query).fetchdf()
md = df.head().to_markdown(index=False)
print(md) # noqa
return dg.MaterializeResult(
metadata={
"query": dg.MetadataValue.md(query),
"df": dg.MetadataValue.md(md),
},
)
| DuckDbComponent |
python | rapidsai__cudf | python/cudf/cudf/pandas/fast_slow_proxy.py | {
"start": 2548,
"end": 3350
} | class ____:
"""
A totally unusable type. When a "fast" object is not available,
it's useful to set it to _Unusable() so that any operations
on it fail, and ensure fallback to the corresponding
"slow" object.
"""
def __call__(self, *args: Any, **kwds: Any) -> Any:
raise NotImplementedError(
"Fast implementation not available. "
"Falling back to the slow implementation"
)
def __getattribute__(self, name: str) -> Any:
if name in {"__class__"}: # needed for type introspection
return super().__getattribute__(name)
raise TypeError("Unusable type. Falling back to the slow object")
def __repr__(self) -> str:
raise AttributeError("Unusable type. Falling back to the slow object")
| _Unusable |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/sqs.py | {
"start": 1205,
"end": 4764
} | class ____(AwsBaseOperator[SqsHook]):
"""
Publish a message to an Amazon SQS queue.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SqsPublishOperator`
:param sqs_queue: The SQS queue url (templated)
:param message_content: The message content (templated)
:param message_attributes: additional attributes for the message (default: None)
For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message`
:param delay_seconds: message delay (templated) (default: 1 second)
:param message_group_id: This parameter applies only to FIFO (first-in-first-out) queues. (default: None)
For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message`
:param message_deduplication_id: This applies only to FIFO (first-in-first-out) queues.
For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message`
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = SqsHook
template_fields: Sequence[str] = aws_template_fields(
"sqs_queue",
"message_content",
"delay_seconds",
"message_attributes",
"message_group_id",
"message_deduplication_id",
)
template_fields_renderers = {"message_attributes": "json"}
ui_color = "#6ad3fa"
def __init__(
self,
*,
sqs_queue: str,
message_content: str,
message_attributes: dict | None = None,
delay_seconds: int = 0,
message_group_id: str | None = None,
message_deduplication_id: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.sqs_queue = sqs_queue
self.message_content = message_content
self.delay_seconds = delay_seconds
self.message_attributes = message_attributes or {}
self.message_group_id = message_group_id
self.message_deduplication_id = message_deduplication_id
def execute(self, context: Context) -> dict:
"""
Publish the message to the Amazon SQS queue.
:param context: the context object
:return: dict with information about the message sent
For details of the returned dict see :py:meth:`botocore.client.SQS.send_message`
"""
result = self.hook.send_message(
queue_url=self.sqs_queue,
message_body=self.message_content,
delay_seconds=self.delay_seconds,
message_attributes=self.message_attributes,
message_group_id=self.message_group_id,
message_deduplication_id=self.message_deduplication_id,
)
self.log.info("send_message result: %s", result)
return result
| SqsPublishOperator |
python | great-expectations__great_expectations | great_expectations/data_context/types/base.py | {
"start": 5855,
"end": 7372
} | class ____(DictDot):
def __init__( # noqa: PLR0913 # FIXME CoP
self,
name,
class_name=None,
module_name=None,
orderby="asc",
reference_list=None,
order_keys_by=None,
key_reference_list=None,
datetime_format=None,
**kwargs,
) -> None:
self._name = name
self._class_name = class_name
self._module_name = module_name
self._orderby = orderby
for k, v in kwargs.items():
setattr(self, k, v)
if reference_list is not None:
self._reference_list = reference_list
if order_keys_by is not None:
self._order_keys_by = order_keys_by
if key_reference_list is not None:
self._key_reference_list = key_reference_list
if datetime_format is not None:
self._datetime_format = datetime_format
@property
def name(self):
return self._name
@property
def module_name(self):
return self._module_name
@property
def class_name(self):
return self._class_name
@property
def orderby(self):
return self._orderby
@property
def reference_list(self):
return self._reference_list
@property
def order_keys_by(self):
return self._order_keys_by
@property
def key_reference_list(self):
return self._key_reference_list
@property
def datetime_format(self):
return self._datetime_format
| SorterConfig |
python | getsentry__sentry | src/sentry/notifications/notifications/strategies/role_based_recipient_strategy.py | {
"start": 547,
"end": 3415
} | class ____(metaclass=ABCMeta):
member_by_user_id: MutableMapping[int, OrganizationMember] = {}
role: OrganizationRole | None = None
scope: str | None = None
def __init__(self, organization: Organization):
self.organization = organization
def get_member(self, user: RpcUser | Actor) -> OrganizationMember:
# cache the result
actor = Actor.from_object(user)
if actor.actor_type != ActorType.USER:
raise OrganizationMember.DoesNotExist()
user_id = actor.id
if user_id not in self.member_by_user_id:
self.member_by_user_id[user_id] = OrganizationMember.objects.get(
user_id=user_id, organization=self.organization
)
return self.member_by_user_id[user_id]
def set_member_in_cache(self, member: OrganizationMember) -> None:
"""
A way to set a member in a cache to avoid a query.
"""
if member.user_id is not None:
self.member_by_user_id[member.user_id] = member
def determine_recipients(
self,
) -> list[RpcUser]:
members = self.determine_member_recipients()
# store the members in our cache
for member in members:
self.set_member_in_cache(member)
# convert members to users
return user_service.get_many_by_id(
ids=[member.user_id for member in members if member.user_id]
)
def determine_member_recipients(self) -> QuerySet[OrganizationMember]:
"""
Depending on the type of request this might be all organization owners,
a specific person, or something in between.
"""
# default strategy is OrgMembersRecipientStrategy
members = OrganizationMember.objects.get_contactable_members_for_org(self.organization.id)
if not self.scope and not self.role:
return members
# you can either set the scope or the role for now
# if both are set we use the scope
valid_roles = []
if self.role and not self.scope:
valid_roles = [self.role.id]
elif self.scope:
valid_roles = [r.id for r in roles.get_all() if r.has_scope(self.scope)]
members = members.filter(role__in=valid_roles)
return members
def build_notification_footer_from_settings_url(self, settings_url: str) -> str:
if self.scope and not self.role:
return (
"You are receiving this notification because you have the scope "
f"{self.scope} | {settings_url}"
)
role_name = "Member"
if self.role:
role_name = self.role.name
return (
"You are receiving this notification because you're listed as an organization "
f"{role_name} | {settings_url}"
)
| RoleBasedRecipientStrategy |
python | walkccc__LeetCode | solutions/1976. Number of Ways to Arrive at Destination/1976.py | {
"start": 0,
"end": 874
} | class ____:
def countPaths(self, n: int, roads: list[list[int]]) -> int:
graph = [[] for _ in range(n)]
for u, v, w in roads:
graph[u].append((v, w))
graph[v].append((u, w))
return self._dijkstra(graph, 0, n - 1)
def _dijkstra(
self,
graph: list[list[tuple[int, int]]],
src: int,
dst: int,
) -> int:
MOD = 10**9 + 7
ways = [0] * len(graph)
dist = [math.inf] * len(graph)
ways[src] = 1
dist[src] = 0
minHeap = [(dist[src], src)]
while minHeap:
d, u = heapq.heappop(minHeap)
if d > dist[u]:
continue
for v, w in graph[u]:
if d + w < dist[v]:
dist[v] = d + w
ways[v] = ways[u]
heapq.heappush(minHeap, (dist[v], v))
elif d + w == dist[v]:
ways[v] += ways[u]
ways[v] %= MOD
return ways[dst]
| Solution |
python | ray-project__ray | rllib/core/models/specs/specs_base.py | {
"start": 178,
"end": 330
} | class ____:
pass
@Deprecated(
help="The Spec checking APIs have been deprecated and cancelled without "
"replacement.",
error=True,
)
| Spec |
python | pallets__jinja | src/jinja2/sandbox.py | {
"start": 4898,
"end": 13757
} | class ____(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table: dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table: dict[str, t.Callable[[t.Any], t.Any]] = {
"+": operator.pos,
"-": operator.neg,
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops: frozenset[str] = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops: frozenset[str] = frozenset()
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
super().__init__(*args, **kwargs)
self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj: t.Any) -> bool:
"""Check if an object is safely callable. By default callables
are considered safe unless decorated with :func:`unsafe`.
This also recognizes the Django convention of setting
``func.alters_data = True``.
"""
return not (
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
)
def call_binop(
self, context: Context, operator: str, left: t.Any, right: t.Any
) -> t.Any:
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context: Context, operator: str, arg: t.Any) -> t.Any:
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj: t.Any, argument: str | t.Any) -> t.Any | Undefined:
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
fmt = self.wrap_str_format(value)
if fmt is not None:
return fmt
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj: t.Any, attribute: str) -> t.Any | Undefined:
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
fmt = self.wrap_str_format(value)
if fmt is not None:
return fmt
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj: t.Any, attribute: str) -> Undefined:
"""Return an undefined object for unsafe attributes."""
return self.undefined(
f"access to attribute {attribute!r} of"
f" {type(obj).__name__!r} object is unsafe.",
name=attribute,
obj=obj,
exc=SecurityError,
)
def wrap_str_format(self, value: t.Any) -> t.Callable[..., str] | None:
"""If the given value is a ``str.format`` or ``str.format_map`` method,
return a new function than handles sandboxing. This is done at access
rather than in :meth:`call`, so that calls made without ``call`` are
also sandboxed.
"""
if not isinstance(
value, (types.MethodType, types.BuiltinMethodType)
) or value.__name__ not in ("format", "format_map"):
return None
f_self: t.Any = value.__self__
if not isinstance(f_self, str):
return None
str_type: type[str] = type(f_self)
is_format_map = value.__name__ == "format_map"
formatter: SandboxedFormatter
if isinstance(f_self, Markup):
formatter = SandboxedEscapeFormatter(self, escape=f_self.escape)
else:
formatter = SandboxedFormatter(self)
vformat = formatter.vformat
def wrapper(*args: t.Any, **kwargs: t.Any) -> str:
if is_format_map:
if kwargs:
raise TypeError("format_map() takes no keyword arguments")
if len(args) != 1:
raise TypeError(
f"format_map() takes exactly one argument ({len(args)} given)"
)
kwargs = args[0]
args = ()
return str_type(vformat(f_self, args, kwargs))
return update_wrapper(wrapper, value)
def call(
__self, # noqa: B902
__context: Context,
__obj: t.Any,
*args: t.Any,
**kwargs: t.Any,
) -> t.Any:
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError(f"{__obj!r} is not safely callable")
return __context.call(__obj, *args, **kwargs)
| SandboxedEnvironment |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_Z.py | {
"start": 2694,
"end": 3768
} | class ____(Benchmark):
r"""
Zettl objective function.
This class defines the Zettl [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Zettl}}(x) = \frac{1}{4} x_{1} + \left(x_{1}^{2} - 2 x_{1}
+ x_{2}^{2}\right)^{2}
with :math:`x_i \in [-1, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.0037912` for :math:`x = [-0.029896, 0.0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-0.02989597760285287, 0.0]]
self.fglob = -0.003791237220468656
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + x[1] ** 2 - 2 * x[0]) ** 2 + 0.25 * x[0]
| Zettl |
python | anthropics__anthropic-sdk-python | examples/structured_outputs_streaming.py | {
"start": 196,
"end": 757
} | class ____(pydantic.BaseModel):
product_name: str
price: float
quantity: int
client = anthropic.Anthropic()
prompt = """
Extract the product name, price, and quantity from this customer message:
"Hi, I’d like to order 2 packs of Green Tea for 5.50 dollars each."
"""
with client.beta.messages.stream(
model="claude-sonnet-4-5",
messages=[{"role": "user", "content": prompt}],
max_tokens=1024,
output_format=Order,
) as stream:
for event in stream:
if event.type == "text":
print(event.parsed_snapshot())
| Order |
python | kamyu104__LeetCode-Solutions | Python/remove-palindromic-subsequences.py | {
"start": 29,
"end": 376
} | class ____(object):
def removePalindromeSub(self, s):
"""
:type s: str
:rtype: int
"""
def is_palindrome(s):
for i in xrange(len(s)//2):
if s[i] != s[-1-i]:
return False
return True
return 2 - is_palindrome(s) - (s == "")
| Solution |
python | scipy__scipy | scipy/stats/tests/test_morestats.py | {
"start": 105931,
"end": 112019
} | class ____:
def setup_method(self):
self.x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
@pytest.mark.parametrize("method", ["mle", "pearsonr", "all"])
@pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
def test_bounded_optimizer_within_bounds(self, method, bounds):
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded")
maxlog = stats.boxcox_normmax(self.x, method=method,
optimizer=optimizer)
assert np.all(bounds[0] < maxlog)
assert np.all(maxlog < bounds[1])
@pytest.mark.slow
def test_user_defined_optimizer(self):
# tests an optimizer that is not based on scipy.optimize.minimize
lmbda = stats.boxcox_normmax(self.x)
lmbda_rounded = np.round(lmbda, 5)
lmbda_range = np.linspace(lmbda_rounded-0.01, lmbda_rounded+0.01, 1001)
class MyResult:
pass
def optimizer(fun):
# brute force minimum over the range
objs = []
for lmbda in lmbda_range:
objs.append(fun(lmbda))
res = MyResult()
res.x = lmbda_range[np.argmin(objs)]
return res
lmbda2 = stats.boxcox_normmax(self.x, optimizer=optimizer)
assert lmbda2 != lmbda # not identical
assert_allclose(lmbda2, lmbda, 1e-5) # but as close as it should be
def test_user_defined_optimizer_and_brack_raises_error(self):
optimizer = optimize.minimize_scalar
# Using default `brack=None` with user-defined `optimizer` works as
# expected.
stats.boxcox_normmax(self.x, brack=None, optimizer=optimizer)
# Using user-defined `brack` with user-defined `optimizer` is expected
# to throw an error. Instead, users should specify
# optimizer-specific parameters in the optimizer function itself.
with pytest.raises(ValueError, match="`brack` must be None if "
"`optimizer` is given"):
stats.boxcox_normmax(self.x, brack=(-2.0, 2.0),
optimizer=optimizer)
@pytest.mark.parametrize(
'x', ([2003.0, 1950.0, 1997.0, 2000.0, 2009.0],
[0.50000471, 0.50004979, 0.50005902, 0.50009312, 0.50001632]))
def test_overflow(self, x):
message = "The optimal lambda is..."
with pytest.warns(UserWarning, match=message):
lmbda = stats.boxcox_normmax(x, method='mle')
assert np.isfinite(special.boxcox(x, lmbda)).all()
# 10000 is safety factor used in boxcox_normmax
ymax = np.finfo(np.float64).max / 10000
x_treme = np.max(x) if lmbda > 0 else np.min(x)
y_extreme = special.boxcox(x_treme, lmbda)
assert_allclose(y_extreme, ymax * np.sign(lmbda))
def test_negative_ymax(self):
with pytest.raises(ValueError, match="`ymax` must be strictly positive"):
stats.boxcox_normmax(self.x, ymax=-1)
@pytest.mark.parametrize("x", [
# positive overflow in float64
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0],
dtype=np.float64),
# negative overflow in float64
np.array([0.50000471, 0.50004979, 0.50005902, 0.50009312, 0.50001632],
dtype=np.float64),
# positive overflow in float32
np.array([200.3, 195.0, 199.7, 200.0, 200.9],
dtype=np.float32),
# negative overflow in float32
np.array([2e-30, 1e-30, 1e-30, 1e-30, 1e-30, 1e-30],
dtype=np.float32),
])
@pytest.mark.parametrize("ymax", [1e10, 1e30, None])
# TODO: add method "pearsonr" after fix overflow issue
@pytest.mark.parametrize("method", ["mle"])
def test_user_defined_ymax_input_float64_32(self, x, ymax, method):
# Test the maximum of the transformed data close to ymax
with pytest.warns(UserWarning, match="The optimal lambda is"):
kwarg = {'ymax': ymax} if ymax is not None else {}
lmb = stats.boxcox_normmax(x, method=method, **kwarg)
x_treme = [np.min(x), np.max(x)]
ymax_res = max(abs(stats.boxcox(x_treme, lmb)))
if ymax is None:
# 10000 is safety factor used in boxcox_normmax
ymax = np.finfo(x.dtype).max / 10000
assert_allclose(ymax, ymax_res, rtol=1e-5)
@pytest.mark.parametrize("x", [
# positive overflow in float32 but not float64
[200.3, 195.0, 199.7, 200.0, 200.9],
# negative overflow in float32 but not float64
[2e-30, 1e-30, 1e-30, 1e-30, 1e-30, 1e-30],
])
# TODO: add method "pearsonr" after fix overflow issue
@pytest.mark.parametrize("method", ["mle"])
def test_user_defined_ymax_inf(self, x, method):
x_32 = np.asarray(x, dtype=np.float32)
x_64 = np.asarray(x, dtype=np.float64)
# assert overflow with float32 but not float64
with pytest.warns(UserWarning, match="The optimal lambda is"):
stats.boxcox_normmax(x_32, method=method)
stats.boxcox_normmax(x_64, method=method)
# compute the true optimal lambda then compare them
lmb_32 = stats.boxcox_normmax(x_32, ymax=np.inf, method=method)
lmb_64 = stats.boxcox_normmax(x_64, ymax=np.inf, method=method)
assert_allclose(lmb_32, lmb_64, rtol=1e-2)
| TestBoxcoxNormmax |
python | apache__airflow | dev/breeze/tests/test_ui_commands.py | {
"start": 6667,
"end": 6875
} | class ____:
def test_locale_files_creation(self):
lf = LocaleFiles(locale="en", files=["test.json", "common.json"])
assert lf.locale == "en"
assert len(lf.files) == 2
| TestLocaleFiles |
python | huggingface__transformers | tests/models/cohere2/test_modeling_cohere2.py | {
"start": 1838,
"end": 2918
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (Cohere2Model, Cohere2ForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": Cohere2Model,
"text-generation": Cohere2ForCausalLM,
}
if is_torch_available()
else {}
)
_is_stateful = True
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
def setUp(self):
self.model_tester = Cohere2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=Cohere2Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@slow
@require_read_token
@require_torch_large_accelerator
| Cohere2ModelTest |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/networks.py | {
"start": 18212,
"end": 20050
} | class ____(nn.Module, Critic):
def __init__(
self,
stream_names: List[str],
observation_specs: List[ObservationSpec],
network_settings: NetworkSettings,
encoded_act_size: int = 0,
outputs_per_stream: int = 1,
):
# This is not a typo, we want to call __init__ of nn.Module
nn.Module.__init__(self)
self.network_body = NetworkBody(
observation_specs, network_settings, encoded_act_size=encoded_act_size
)
if network_settings.memory is not None:
encoding_size = network_settings.memory.memory_size // 2
else:
encoding_size = network_settings.hidden_units
self.value_heads = ValueHeads(stream_names, encoding_size, outputs_per_stream)
def update_normalization(self, buffer: AgentBuffer) -> None:
self.network_body.update_normalization(buffer)
@property
def memory_size(self) -> int:
return self.network_body.memory_size
def critic_pass(
self,
inputs: List[torch.Tensor],
memories: Optional[torch.Tensor] = None,
sequence_length: int = 1,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
value_outputs, critic_mem_out = self.forward(
inputs, memories=memories, sequence_length=sequence_length
)
return value_outputs, critic_mem_out
def forward(
self,
inputs: List[torch.Tensor],
actions: Optional[torch.Tensor] = None,
memories: Optional[torch.Tensor] = None,
sequence_length: int = 1,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
encoding, memories = self.network_body(
inputs, actions, memories, sequence_length
)
output = self.value_heads(encoding)
return output, memories
| ValueNetwork |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 4400,
"end": 5067
} | class ____(DataModel):
"""
A data model for omitted arguments. Only the "argument" representation
is defined, other representations raise a NotImplementedError.
"""
# Omitted arguments are using a dummy value type
def get_value_type(self):
return ir.LiteralStructType([])
# Omitted arguments don't produce any LLVM function argument.
def get_argument_type(self):
return ()
def as_argument(self, builder, val):
return ()
def from_argument(self, builder, val):
assert val == (), val
return None
@register_default(types.Boolean)
@register_default(types.BooleanLiteral)
| OmittedArgDataModel |
python | huggingface__transformers | src/transformers/models/deberta/modeling_deberta.py | {
"start": 42107,
"end": 44436
} | class ____(DebertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@auto_docstring
| DebertaForTokenClassification |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 17617,
"end": 18071
} | class ____(_Multi2VecBase):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.MULTI2MULTI_JINAAI, frozen=True, exclude=True
)
baseURL: Optional[AnyHttpUrl]
model: Optional[str]
def _to_dict(self) -> Dict[str, Any]:
ret_dict = super()._to_dict()
if self.baseURL is not None:
ret_dict["baseURL"] = self.baseURL.unicode_string()
return ret_dict
| _Multi2MultiVecJinaConfig |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_pubsub.py | {
"start": 3843,
"end": 11017
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.pubsub.PubSubHook")
def test_execute(self, mock_hook):
operator = PubSubCreateSubscriptionOperator(
task_id=TASK_ID, project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION
)
mock_hook.return_value.create_subscription.return_value = TEST_SUBSCRIPTION
context = mock.MagicMock()
response = operator.execute(context=context)
mock_hook.return_value.create_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id=None,
ack_deadline_secs=10,
fail_if_exists=False,
push_config=None,
retain_acked_messages=None,
message_retention_duration=None,
labels=None,
enable_message_ordering=False,
expiration_policy=None,
filter_=None,
dead_letter_policy=None,
retry_policy=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
assert response == TEST_SUBSCRIPTION
@mock.patch("airflow.providers.google.cloud.operators.pubsub.PubSubHook")
def test_execute_different_project_ids(self, mock_hook):
another_project = "another-project"
operator = PubSubCreateSubscriptionOperator(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id=another_project,
task_id=TASK_ID,
)
mock_hook.return_value.create_subscription.return_value = TEST_SUBSCRIPTION
context = mock.MagicMock()
response = operator.execute(context=context)
mock_hook.return_value.create_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id=another_project,
ack_deadline_secs=10,
fail_if_exists=False,
push_config=None,
retain_acked_messages=None,
message_retention_duration=None,
labels=None,
enable_message_ordering=False,
expiration_policy=None,
filter_=None,
dead_letter_policy=None,
retry_policy=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
assert response == TEST_SUBSCRIPTION
@mock.patch("airflow.providers.google.cloud.operators.pubsub.PubSubHook")
def test_execute_no_subscription(self, mock_hook):
operator = PubSubCreateSubscriptionOperator(
task_id=TASK_ID, project_id=TEST_PROJECT, topic=TEST_TOPIC
)
mock_hook.return_value.create_subscription.return_value = TEST_SUBSCRIPTION
context = mock.MagicMock()
response = operator.execute(context=context)
mock_hook.return_value.create_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=None,
subscription_project_id=None,
ack_deadline_secs=10,
fail_if_exists=False,
push_config=None,
retain_acked_messages=None,
message_retention_duration=None,
labels=None,
enable_message_ordering=False,
expiration_policy=None,
filter_=None,
dead_letter_policy=None,
retry_policy=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
assert response == TEST_SUBSCRIPTION
@pytest.mark.parametrize(
("project_id", "subscription", "subscription_project_id", "expected_input", "expected_output"),
[
(
TEST_PROJECT,
TEST_SUBSCRIPTION,
None,
f"topic:{TEST_PROJECT}:{TEST_TOPIC}",
f"subscription:{TEST_PROJECT}:{TEST_SUBSCRIPTION}",
),
(
TEST_PROJECT,
TEST_SUBSCRIPTION,
"another-project",
f"topic:{TEST_PROJECT}:{TEST_TOPIC}",
f"subscription:another-project:{TEST_SUBSCRIPTION}",
),
(
TEST_PROJECT,
None,
None,
f"topic:{TEST_PROJECT}:{TEST_TOPIC}",
f"subscription:{TEST_PROJECT}:generated",
),
(
TEST_PROJECT,
None,
"another-project",
f"topic:{TEST_PROJECT}:{TEST_TOPIC}",
"subscription:another-project:generated",
),
(
None,
None,
None,
f"topic:connection-project:{TEST_TOPIC}",
"subscription:connection-project:generated",
),
],
)
@mock.patch("airflow.providers.google.cloud.operators.pubsub.PubSubHook")
def test_get_openlineage_facets(
self,
mock_hook,
project_id,
subscription,
subscription_project_id,
expected_input,
expected_output,
):
operator = PubSubCreateSubscriptionOperator(
task_id=TASK_ID,
project_id=project_id,
topic=TEST_TOPIC,
subscription=subscription,
subscription_project_id=subscription_project_id,
)
mock_hook.return_value.create_subscription.return_value = subscription or "generated"
mock_hook.return_value.project_id = project_id or "connection-project"
context = mock.MagicMock()
response = operator.execute(context=context)
mock_hook.return_value.create_subscription.assert_called_once_with(
project_id=project_id,
topic=TEST_TOPIC,
subscription=subscription,
subscription_project_id=subscription_project_id,
ack_deadline_secs=10,
fail_if_exists=False,
push_config=None,
retain_acked_messages=None,
message_retention_duration=None,
labels=None,
enable_message_ordering=False,
expiration_policy=None,
filter_=None,
dead_letter_policy=None,
retry_policy=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
if subscription:
assert response == TEST_SUBSCRIPTION
else:
assert response == "generated"
result = operator.get_openlineage_facets_on_complete(operator)
assert not result.run_facets
assert not result.job_facets
assert len(result.inputs) == 1
assert result.inputs[0].namespace == "pubsub"
assert result.inputs[0].name == expected_input
assert len(result.outputs) == 1
assert result.outputs[0].namespace == "pubsub"
assert result.outputs[0].name == expected_output
| TestPubSubSubscriptionCreateOperator |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_path_converters.py | {
"start": 4009,
"end": 5624
} | class ____:
def test_create_converter(self):
converter = simple_package_converter("mypackage")
root = Path("/project")
file_path = root / "submodule.py"
result = converter(file_path, root)
assert result == "mypackage.submodule"
def test_nested_modules(self):
converter = simple_package_converter("mypackage")
root = Path("/project")
file_path = root / "sub1" / "sub2" / "module.py"
result = converter(file_path, root)
assert result == "mypackage.sub1.sub2.module"
def test_init_module(self):
converter = simple_package_converter("mypackage")
root = Path("/project")
file_path = root / "subpackage" / "__init__.py"
result = converter(file_path, root)
assert result == "mypackage.subpackage"
def test_root_level_module(self):
converter = simple_package_converter("mypackage")
root = Path("/project")
file_path = root / "module.py"
result = converter(file_path, root)
assert result == "mypackage.module"
def test_root_level_init(self):
converter = simple_package_converter("mypackage")
root = Path("/project")
file_path = root / "__init__.py"
result = converter(file_path, root)
assert result == "mypackage"
def test_file_outside_root(self):
converter = simple_package_converter("mypackage")
root = Path("/project")
file_path = Path("/other") / "module.py"
result = converter(file_path, root)
assert result is None
| TestSimplePackageConverter |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_asb.py | {
"start": 14120,
"end": 16842
} | class ____:
def test_init(self):
"""
Test init by creating ASBCreateSubscriptionOperator with task id, subscription name, topic name and
asserting with value
"""
asb_create_subscription = AzureServiceBusSubscriptionCreateOperator(
task_id="asb_create_subscription",
topic_name=TOPIC_NAME,
subscription_name=SUBSCRIPTION_NAME,
)
assert asb_create_subscription.task_id == "asb_create_subscription"
assert asb_create_subscription.subscription_name == SUBSCRIPTION_NAME
assert asb_create_subscription.topic_name == TOPIC_NAME
@mock.patch("airflow.providers.microsoft.azure.hooks.asb.AdminClientHook.get_conn")
@mock.patch("azure.servicebus.management.SubscriptionProperties")
def test_create_subscription(self, mock_subscription_properties, mock_get_conn):
"""
Test AzureServiceBusSubscriptionCreateOperator passed with the subscription name, topic name
mocking the connection details, hook create_subscription function
"""
asb_create_subscription = AzureServiceBusSubscriptionCreateOperator(
task_id="create_service_bus_subscription",
topic_name=TOPIC_NAME,
subscription_name=SUBSCRIPTION_NAME,
)
mock_subscription_properties.name = SUBSCRIPTION_NAME
mock_subscription_properties.to = SUBSCRIPTION_NAME
mock_get_conn.return_value.__enter__.return_value.create_subscription.return_value = (
mock_subscription_properties
)
with mock.patch.object(asb_create_subscription.log, "info") as mock_log_info:
asb_create_subscription.execute(None)
mock_log_info.assert_called_with("Created subscription %s", SUBSCRIPTION_NAME)
@pytest.mark.parametrize(
("mock_subscription_name", "mock_topic_name"),
[("subscription_1", None), (None, "topic_1")],
)
@mock.patch("airflow.providers.microsoft.azure.hooks.asb.AdminClientHook")
def test_create_subscription_exception(
self, mock_sb_admin_client, mock_subscription_name, mock_topic_name
):
"""
Test `AzureServiceBusSubscriptionCreateOperator` functionality to raise AirflowException,
by passing subscription name and topic name as None and pytest raise Airflow Exception
"""
asb_create_subscription = AzureServiceBusSubscriptionCreateOperator(
task_id="create_service_bus_subscription",
topic_name=mock_topic_name,
subscription_name=mock_subscription_name,
)
with pytest.raises(TypeError):
asb_create_subscription.execute(None)
| TestASBCreateSubscriptionOperator |
python | huggingface__transformers | tests/utils/test_hf_argparser.py | {
"start": 1734,
"end": 1795
} | class ____(Enum):
titi = "titi"
toto = "toto"
| BasicEnum |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 11355,
"end": 11793
} | class ____(PlotEvent):
''' Announce the end of "interactive level-of-detail" mode on a plot.
During interactive actions such as panning or zooming, Bokeh can
optionally, temporarily draw a reduced set of the data, in order to
maintain high interactive rates. This is referred to as interactive
Level-of-Detail (LOD) mode. This event fires whenever a LOD mode
has just ended.
'''
event_name = 'lodend'
| LODEnd |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/prefect_kubernetes/worker.py | {
"start": 25652,
"end": 38069
} | class ____(
BaseWorker[
"KubernetesWorkerJobConfiguration",
"KubernetesWorkerVariables",
"KubernetesWorkerResult",
]
):
"""Prefect worker that executes flow runs within Kubernetes Jobs."""
type: str = "kubernetes"
job_configuration = KubernetesWorkerJobConfiguration
job_configuration_variables = KubernetesWorkerVariables
_description = (
"Execute flow runs within jobs scheduled on a Kubernetes cluster. Requires a "
"Kubernetes cluster."
)
_display_name = "Kubernetes"
_documentation_url = "https://docs.prefect.io/integrations/prefect-kubernetes"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/2d0b896006ad463b49c28aaac14f31e00e32cfab-250x250.png" # noqa
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self._created_secrets: dict[
tuple[str, str], KubernetesWorkerJobConfiguration
] = {}
async def _initiate_run(
self,
flow_run: "FlowRun",
configuration: KubernetesWorkerJobConfiguration,
) -> None:
"""
Creates a Kubernetes job to start flow run execution. This method does not
wait for the job to complete.
Args:
flow_run: The flow run to execute
configuration: The configuration to use when executing the flow run
task_status: The task status object for the current flow run. If provided,
the task will be marked as started.
"""
logger = self.get_flow_run_logger(flow_run)
async with self._get_configured_kubernetes_client(configuration) as client:
logger.info("Creating Kubernetes job...")
await self._create_job(configuration, client)
async def run(
self,
flow_run: "FlowRun",
configuration: KubernetesWorkerJobConfiguration,
task_status: anyio.abc.TaskStatus[int] | None = None,
) -> KubernetesWorkerResult:
"""
Executes a flow run within a Kubernetes Job and waits for the flow run
to complete.
Args:
flow_run: The flow run to execute
configuration: The configuration to use when executing the flow run.
task_status: The task status object for the current flow run. If provided,
the task will be marked as started.
Returns:
KubernetesWorkerResult: A result object containing information about the
final state of the flow run
"""
logger = self.get_flow_run_logger(flow_run)
async with self._get_configured_kubernetes_client(configuration) as client:
logger.info("Creating Kubernetes job...")
job = await self._create_job(configuration, client)
assert job, "Job should be created"
pid = f"{job.metadata.namespace}:{job.metadata.name}"
# Indicate that the job has started
if task_status is not None:
task_status.started(pid)
return KubernetesWorkerResult(identifier=pid, status_code=0)
async def teardown(self, *exc_info: Any):
await super().teardown(*exc_info)
await self._clean_up_created_secrets()
async def _clean_up_created_secrets(self):
"""Deletes any secrets created during the worker's operation."""
for key, configuration in self._created_secrets.items():
async with self._get_configured_kubernetes_client(configuration) as client:
v1 = CoreV1Api(client)
result = await v1.delete_namespaced_secret(
name=key[0],
namespace=key[1],
)
if isinstance(result, Exception):
self._logger.warning(
"Failed to delete created secret with exception: %s", result
)
@asynccontextmanager
async def _get_configured_kubernetes_client(
self, configuration: KubernetesWorkerJobConfiguration
) -> AsyncGenerator["ApiClient", None]:
"""
Returns a configured Kubernetes client.
"""
client = None
settings = KubernetesSettings()
if configuration.cluster_config:
config_dict = configuration.cluster_config.config
context = configuration.cluster_config.context_name
client = await config.new_client_from_config_dict(
config_dict=config_dict,
context=context,
)
else:
# Try to load in-cluster configuration
try:
config.load_incluster_config()
client = ApiClient()
except config.ConfigException:
# If in-cluster config fails, load the local kubeconfig
client = await config.new_client_from_config()
if settings.worker.add_tcp_keepalive:
client.rest_client.pool_manager._request_class = KeepAliveClientRequest
try:
yield client
finally:
await client.close()
async def _replace_api_key_with_secret(
self,
configuration: KubernetesWorkerJobConfiguration,
client: "ApiClient",
secret_name: str | None = None,
secret_key: str | None = None,
):
"""Replaces the PREFECT_API_KEY environment variable with a Kubernetes secret"""
api_key = configuration.get_environment_variable_value("PREFECT_API_KEY")
if api_key and not secret_name:
secret_name = f"prefect-{_slugify_name(self.name)}-api-key"
secret = await self._upsert_secret(
name=secret_name,
value=api_key,
namespace=configuration.namespace,
client=client,
)
# Store configuration so that we can delete the secret when the worker shuts
# down
self._created_secrets[(secret.metadata.name, secret.metadata.namespace)] = (
configuration
)
secret_key = "value"
if secret_name and secret_key:
await self._replace_env_variable_with_secret(
env_variable_name="PREFECT_API_KEY",
configuration=configuration,
secret_name=secret_name,
secret_key=secret_key,
)
async def _replace_env_variable_with_secret(
self,
env_variable_name: str,
configuration: KubernetesWorkerJobConfiguration,
secret_name: str,
secret_key: str,
):
"""Replaces the an environment variable with a Kubernetes secret"""
manifest_env: list[dict[str, Any]] = configuration.job_manifest["spec"][
"template"
]["spec"]["containers"][0].get("env")
if secret_name and secret_key:
if not secret_key:
secret_key = "value"
new_api_env_entry = {
"name": env_variable_name,
"valueFrom": {"secretKeyRef": {"name": secret_name, "key": secret_key}},
}
manifest_env = [
entry if entry.get("name") != env_variable_name else new_api_env_entry
for entry in manifest_env
]
configuration.job_manifest["spec"]["template"]["spec"]["containers"][0][
"env"
] = manifest_env
@retry(
stop=stop_after_attempt(MAX_ATTEMPTS),
wait=wait_fixed(RETRY_MIN_DELAY_SECONDS)
+ wait_random(
RETRY_MIN_DELAY_JITTER_SECONDS,
RETRY_MAX_DELAY_JITTER_SECONDS,
),
reraise=True,
)
async def _create_job(
self, configuration: KubernetesWorkerJobConfiguration, client: "ApiClient"
) -> "V1Job":
"""
Creates a Kubernetes job from a job manifest.
"""
settings = KubernetesSettings()
if settings.worker.api_key_secret_name:
await self._replace_api_key_with_secret(
configuration=configuration,
client=client,
secret_name=settings.worker.api_key_secret_name,
secret_key=settings.worker.api_key_secret_key,
)
elif settings.worker.create_secret_for_api_key:
await self._replace_api_key_with_secret(
configuration=configuration, client=client
)
if (
settings.worker.api_auth_string_secret_name
and settings.worker.api_auth_string_secret_key
):
await self._replace_env_variable_with_secret(
env_variable_name="PREFECT_API_AUTH_STRING",
configuration=configuration,
secret_name=settings.worker.api_auth_string_secret_name,
secret_key=settings.worker.api_auth_string_secret_key,
)
else:
if configuration.get_environment_variable_value("PREFECT_API_AUTH_STRING"):
self._logger.warning(
"PREFECT_API_AUTH_STRING is set, but no secret name or key is provided. "
"The API auth string will be stored in the Kubernetes job manifest."
"This is not recommended and may be removed in a future version. "
"Please store the API auth string in a Kubernetes secret and "
"provide the secret name and key with the `PREFECT_INTEGRATIONS_KUBERNETES_WORKER_API_AUTH_STRING_SECRET_NAME` "
"and `PREFECT_INTEGRATIONS_KUBERNETES_WORKER_API_AUTH_STRING_SECRET_KEY` environment variables."
)
try:
batch_client = BatchV1Api(client)
job = await batch_client.create_namespaced_job(
configuration.namespace,
configuration.job_manifest,
)
except kubernetes_asyncio.client.exceptions.ApiException as exc:
# Parse the reason and message from the response if feasible
message = ""
if exc.reason:
message += ": " + exc.reason
if exc.body and "message" in (body := json.loads(exc.body)):
message += ": " + body["message"]
raise InfrastructureError(
f"Unable to create Kubernetes job{message}"
) from exc
return job
async def _upsert_secret(
self, name: str, value: str, namespace: str, client: "ApiClient"
):
encoded_value = base64.b64encode(value.encode("utf-8")).decode("utf-8")
core_client = CoreV1Api(client)
try:
# Get the current version of the Secret and update it with the
# new value
current_secret = await core_client.read_namespaced_secret(
name=name, namespace=namespace
)
current_secret.data = {"value": encoded_value}
secret = await core_client.replace_namespaced_secret(
name=name, namespace=namespace, body=current_secret
)
except ApiException as exc:
if exc.status != 404:
raise
# Create the secret if it doesn't already exist
metadata = V1ObjectMeta(name=name, namespace=namespace)
secret = V1Secret(
api_version="v1",
kind="Secret",
metadata=metadata,
data={"value": encoded_value},
)
secret = await core_client.create_namespaced_secret(
namespace=namespace, body=secret
)
return secret
@asynccontextmanager
async def _get_batch_client(
self, client: "ApiClient"
) -> AsyncGenerator["BatchV1Api", None]:
"""
Context manager for retrieving a Kubernetes batch client.
"""
try:
yield BatchV1Api(api_client=client)
finally:
await client.close()
async def __aenter__(self):
if KubernetesSettings().observer.enabled:
start_observer()
return await super().__aenter__()
async def __aexit__(self, *exc_info: Any):
try:
await super().__aexit__(*exc_info)
finally:
# Need to run after the runs task group exits
if KubernetesSettings().observer.enabled:
stop_observer()
| KubernetesWorker |
python | huggingface__transformers | tests/models/auto/test_video_processing_auto.py | {
"start": 1212,
"end": 11158
} | class ____(unittest.TestCase):
def setUp(self):
transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0
def test_video_processor_from_model_shortcut(self):
config = AutoVideoProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
self.assertIsInstance(config, LlavaOnevisionVideoProcessor)
def test_video_processor_from_local_directory_from_key(self):
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "video_preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{
"video_processor_type": "LlavaOnevisionVideoProcessor",
"processor_class": "LlavaOnevisionProcessor",
},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "llava_onevision"}, open(config_tmpfile, "w"))
config = AutoVideoProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(config, LlavaOnevisionVideoProcessor)
def test_video_processor_from_local_directory_from_preprocessor_key(self):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{
"video_processor_type": "LlavaOnevisionVideoProcessor",
"processor_class": "LlavaOnevisionProcessor",
},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "llava_onevision"}, open(config_tmpfile, "w"))
config = AutoVideoProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(config, LlavaOnevisionVideoProcessor)
def test_video_processor_from_local_directory_from_config(self):
with tempfile.TemporaryDirectory() as tmpdirname:
model_config = LlavaOnevisionConfig()
# Create a dummy config file with image_processor_type
processor_tmpfile = Path(tmpdirname) / "video_preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{
"video_processor_type": "LlavaOnevisionVideoProcessor",
"processor_class": "LlavaOnevisionProcessor",
},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "llava_onevision"}, open(config_tmpfile, "w"))
# remove video_processor_type to make sure config.json alone is enough to load image processor locally
config_dict = AutoVideoProcessor.from_pretrained(tmpdirname).to_dict()
config_dict.pop("video_processor_type")
config = LlavaOnevisionVideoProcessor(**config_dict)
# save in new folder
model_config.save_pretrained(tmpdirname)
config.save_pretrained(tmpdirname)
config = AutoVideoProcessor.from_pretrained(tmpdirname)
# make sure private variable is not incorrectly saved
dict_as_saved = json.loads(config.to_json_string())
self.assertTrue("_processor_class" not in dict_as_saved)
self.assertIsInstance(config, LlavaOnevisionVideoProcessor)
def test_video_processor_from_local_file(self):
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "video_preprocessor_config.json"
json.dump(
{
"video_processor_type": "LlavaOnevisionVideoProcessor",
"processor_class": "LlavaOnevisionProcessor",
},
open(processor_tmpfile, "w"),
)
config = AutoVideoProcessor.from_pretrained(processor_tmpfile)
self.assertIsInstance(config, LlavaOnevisionVideoProcessor)
def test_repo_not_found(self):
with self.assertRaisesRegex(
EnvironmentError,
"llava-hf/llava-doesnt-exist is not a local folder and is not a valid model identifier",
):
_ = AutoVideoProcessor.from_pretrained("llava-hf/llava-doesnt-exist")
def test_revision_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"
):
_ = AutoVideoProcessor.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa")
def test_video_processor_not_found(self):
with self.assertRaisesRegex(
EnvironmentError,
"Can't load video processor for 'hf-internal-testing/config-no-model'.",
):
_ = AutoVideoProcessor.from_pretrained("hf-internal-testing/config-no-model")
def test_from_pretrained_dynamic_video_processor(self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(ValueError):
video_processor = AutoVideoProcessor.from_pretrained("hf-internal-testing/test_dynamic_video_processor")
# If remote code is disabled, we can't load this config.
with self.assertRaises(ValueError):
video_processor = AutoVideoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_video_processor", trust_remote_code=False
)
video_processor = AutoVideoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_video_processor", trust_remote_code=True
)
self.assertEqual(video_processor.__class__.__name__, "NewVideoProcessor")
# Test the dynamic module is loaded only once.
reloaded_video_processor = AutoVideoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_video_processor", trust_remote_code=True
)
self.assertIs(video_processor.__class__, reloaded_video_processor.__class__)
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
video_processor.save_pretrained(tmp_dir)
reloaded_video_processor = AutoVideoProcessor.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_video_processor.__class__.__name__, "NewVideoProcessor")
def test_new_video_processor_registration(self):
try:
AutoConfig.register("custom", CustomConfig)
AutoVideoProcessor.register(CustomConfig, CustomVideoProcessor)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(ValueError):
AutoVideoProcessor.register(LlavaOnevisionConfig, LlavaOnevisionVideoProcessor)
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "video_preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{
"video_processor_type": "LlavaOnevisionVideoProcessor",
"processor_class": "LlavaOnevisionProcessor",
},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "llava_onevision"}, open(config_tmpfile, "w"))
video_processor = CustomVideoProcessor.from_pretrained(tmpdirname)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
video_processor.save_pretrained(tmp_dir)
new_video_processor = AutoVideoProcessor.from_pretrained(tmp_dir)
self.assertIsInstance(new_video_processor, CustomVideoProcessor)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in VIDEO_PROCESSOR_MAPPING._extra_content:
del VIDEO_PROCESSOR_MAPPING._extra_content[CustomConfig]
def test_from_pretrained_dynamic_video_processor_conflict(self):
class NewVideoProcessor(LlavaOnevisionVideoProcessor):
is_local = True
try:
AutoConfig.register("custom", CustomConfig)
AutoVideoProcessor.register(CustomConfig, NewVideoProcessor)
# If remote code is not set, the default is to use local
video_processor = AutoVideoProcessor.from_pretrained("hf-internal-testing/test_dynamic_video_processor")
self.assertEqual(video_processor.__class__.__name__, "NewVideoProcessor")
self.assertTrue(video_processor.is_local)
# If remote code is disabled, we load the local one.
video_processor = AutoVideoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_video_processor", trust_remote_code=False
)
self.assertEqual(video_processor.__class__.__name__, "NewVideoProcessor")
self.assertTrue(video_processor.is_local)
# If remote is enabled, we load from the Hub
video_processor = AutoVideoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_video_processor", trust_remote_code=True
)
self.assertEqual(video_processor.__class__.__name__, "NewVideoProcessor")
self.assertTrue(not hasattr(video_processor, "is_local"))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in VIDEO_PROCESSOR_MAPPING._extra_content:
del VIDEO_PROCESSOR_MAPPING._extra_content[CustomConfig]
| AutoVideoProcessorTest |
python | realpython__materials | python-311/programmers.py | {
"start": 242,
"end": 664
} | class ____:
name: str
life_span: tuple[int, int]
@classmethod
def from_dict(cls, info: Info) -> Self:
return cls(
name=f"{info['name']['first']} {info['name']['last']}",
life_span=(info["birth"]["year"], info["death"]["year"]),
)
def convert_pair(first: Info, second: Info) -> tuple[Person, Person]:
return Person.from_dict(first), Person.from_dict(second)
| Person |
python | huggingface__transformers | src/transformers/models/electra/modeling_electra.py | {
"start": 1901,
"end": 6177
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values_length: int = 0,
) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
# NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0])
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Electra
| ElectraEmbeddings |
python | huggingface__transformers | src/transformers/models/vit_mae/modeling_vit_mae.py | {
"start": 5899,
"end": 11729
} | class ____(nn.Module):
"""
Construct the CLS token, position and patch embeddings.
"""
def __init__(self, config):
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.patch_embeddings = ViTMAEPatchEmbeddings(config)
self.num_patches = self.patch_embeddings.num_patches
# fixed sin-cos embedding
self.position_embeddings = nn.Parameter(
torch.zeros(1, self.num_patches + 1, config.hidden_size), requires_grad=False
)
self.patch_size = config.patch_size
self.config = config
def initialize_weights(self):
if getattr(self.patch_embeddings.projection, "_is_hf_initialized", False):
return
# initialize (and freeze) position embeddings by sin-cos embedding
pos_embed = get_2d_sincos_pos_embed(
self.position_embeddings.shape[-1], int(self.patch_embeddings.num_patches**0.5), add_cls_token=True
)
init.copy_(self.position_embeddings, torch.from_numpy(pos_embed).float().unsqueeze(0))
# initialize patch_embeddings like nn.Linear (instead of nn.Conv2d)
w = self.patch_embeddings.projection.weight
init.xavier_uniform_(w.view([w.shape[0], -1]))
# timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
init.normal_(self.cls_token, std=self.config.initializer_range)
# Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def random_masking(self, sequence, noise=None):
"""
Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random
noise.
Args:
sequence (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`)
noise (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*) which is
mainly used for testing purposes to control randomness and maintain the reproducibility
"""
batch_size, seq_length, dim = sequence.shape
len_keep = int(seq_length * (1 - self.config.mask_ratio))
if noise is None:
noise = torch.rand(batch_size, seq_length, device=sequence.device) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1).to(sequence.device) # ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=1).to(sequence.device)
# keep the first subset
ids_keep = ids_shuffle[:, :len_keep]
sequence_unmasked = torch.gather(sequence, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, dim))
# generate the binary mask: 0 is keep, 1 is remove
mask = torch.ones([batch_size, seq_length], device=sequence.device)
mask[:, :len_keep] = 0
# unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
return sequence_unmasked, mask, ids_restore
def forward(self, pixel_values, noise=None, interpolate_pos_encoding: bool = False):
batch_size, num_channels, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
if interpolate_pos_encoding:
position_embeddings = self.interpolate_pos_encoding(embeddings, height, width)
else:
position_embeddings = self.position_embeddings
# add position embeddings w/o cls token
embeddings = embeddings + position_embeddings[:, 1:, :]
# masking: length -> length * config.mask_ratio
embeddings, mask, ids_restore = self.random_masking(embeddings, noise)
# append cls token
cls_token = self.cls_token + position_embeddings[:, :1, :]
cls_tokens = cls_token.expand(embeddings.shape[0], -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
return embeddings, mask, ids_restore
| ViTMAEEmbeddings |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.