language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pydata__xarray | xarray/groupers.py | {
"start": 28357,
"end": 31006
} | class ____(Grouper):
"""Allows grouping using a custom definition of seasons.
Parameters
----------
seasons: sequence of str
List of strings representing seasons. E.g. ``"JF"`` or ``"JJA"`` etc.
Overlapping seasons are allowed (e.g. ``["DJFM", "MAMJ", "JJAS", "SOND"]``)
Examples
--------
>>> SeasonGrouper(["JF", "MAM", "JJAS", "OND"])
SeasonGrouper(seasons=['JF', 'MAM', 'JJAS', 'OND'])
The ordering is preserved
>>> SeasonGrouper(["MAM", "JJAS", "OND", "JF"])
SeasonGrouper(seasons=['MAM', 'JJAS', 'OND', 'JF'])
Overlapping seasons are allowed
>>> SeasonGrouper(["DJFM", "MAMJ", "JJAS", "SOND"])
SeasonGrouper(seasons=['DJFM', 'MAMJ', 'JJAS', 'SOND'])
"""
seasons: Sequence[str]
# drop_incomplete: bool = field(default=True) # TODO
def factorize(self, group: T_Group) -> EncodedGroups:
if TYPE_CHECKING:
assert not isinstance(group, _DummyGroup)
if not _contains_datetime_like_objects(group.variable):
raise ValueError(
"SeasonGrouper can only be used to group by datetime-like arrays."
)
months = group.dt.month.data
seasons_groups = find_independent_seasons(self.seasons)
codes_ = np.full((len(seasons_groups),) + group.shape, -1, dtype=np.int8)
group_indices: list[list[int]] = [[]] * len(self.seasons)
for axis_index, seasgroup in enumerate(seasons_groups):
for season_tuple, code in zip(
seasgroup.inds, seasgroup.codes, strict=False
):
mask = np.isin(months, season_tuple)
codes_[axis_index, mask] = code
(indices,) = mask.nonzero()
group_indices[code] = indices.tolist()
if np.all(codes_ == -1):
raise ValueError(
"Failed to group data. Are you grouping by a variable that is all NaN?"
)
needs_dummy_dim = len(seasons_groups) > 1
codes = DataArray(
dims=(("__season_dim__",) if needs_dummy_dim else tuple()) + group.dims,
data=codes_ if needs_dummy_dim else codes_.squeeze(),
attrs=group.attrs,
name="season",
)
unique_coord = Variable("season", self.seasons, attrs=group.attrs)
full_index = pd.Index(self.seasons)
return EncodedGroups(
codes=codes,
group_indices=tuple(group_indices),
unique_coord=unique_coord,
full_index=full_index,
)
def reset(self) -> Self:
return type(self)(self.seasons)
@dataclass
| SeasonGrouper |
python | getsentry__sentry | tests/sentry/notifications/notification_action/test_issue_alert_registry_handlers.py | {
"start": 13174,
"end": 14188
} | class ____(BaseWorkflowTest):
def setUp(self) -> None:
super().setUp()
self.handler = MSTeamsIssueAlertHandler()
self.detector = self.create_detector(project=self.project)
self.action = self.create_action(
type=Action.Type.MSTEAMS,
integration_id="1234567890",
config={
"target_identifier": "channel789",
"target_display": "General Channel",
"target_type": ActionTarget.SPECIFIC,
},
)
def test_build_rule_action_blob(self) -> None:
"""Test that build_rule_action_blob creates correct MSTeams action data"""
blob = self.handler.build_rule_action_blob(self.action, self.organization.id)
assert blob == {
"id": "sentry.integrations.msteams.notify_action.MsTeamsNotifyServiceAction",
"team": "1234567890",
"channel_id": "channel789",
"channel": "General Channel",
}
| TestMSTeamsIssueAlertHandler |
python | optuna__optuna | optuna/testing/storages.py | {
"start": 1767,
"end": 7071
} | class ____(AbstractContextManager):
def __init__(self, storage_specifier: str, **kwargs: Any) -> None:
self.storage_specifier = storage_specifier
self.extra_args = kwargs
self.tempfile: IO[Any] | None = None
self.server: grpc.Server | None = None
self.thread: threading.Thread | None = None
self.proxy: GrpcStorageProxy | None = None
self.storage: BaseStorage | None = None
self.backend_storage: BaseStorage | None = None
def __enter__(
self,
) -> (
optuna.storages.InMemoryStorage
| optuna.storages._CachedStorage
| optuna.storages.RDBStorage
| optuna.storages.JournalStorage
| optuna.storages.GrpcStorageProxy
):
if self.storage_specifier == "inmemory":
if len(self.extra_args) > 0:
raise ValueError("InMemoryStorage does not accept any arguments!")
self.storage = optuna.storages.InMemoryStorage()
elif "sqlite" in self.storage_specifier:
self.tempfile = NamedTemporaryFilePool().tempfile()
url = "sqlite:///{}".format(self.tempfile.name)
rdb_storage = optuna.storages.RDBStorage(
url,
engine_kwargs={"connect_args": {"timeout": SQLITE3_TIMEOUT}},
**self.extra_args,
)
self.storage = (
optuna.storages._CachedStorage(rdb_storage)
if "cached" in self.storage_specifier
else rdb_storage
)
elif self.storage_specifier == "journal_redis":
journal_redis_storage = optuna.storages.journal.JournalRedisBackend(
"redis://localhost"
)
journal_redis_storage._redis = self.extra_args.get(
"redis", fakeredis.FakeStrictRedis()
)
self.storage = optuna.storages.JournalStorage(journal_redis_storage)
elif self.storage_specifier == "grpc_journal_file":
self.tempfile = self.extra_args.get("file", NamedTemporaryFilePool().tempfile())
assert self.tempfile is not None
storage = optuna.storages.JournalStorage(
optuna.storages.journal.JournalFileBackend(self.tempfile.name)
)
self.storage = self._create_proxy(
storage, thread_pool=self.extra_args.get("thread_pool")
)
elif "journal" in self.storage_specifier:
self.tempfile = self.extra_args.get("file", NamedTemporaryFilePool().tempfile())
assert self.tempfile is not None
file_storage = JournalFileBackend(self.tempfile.name)
self.storage = optuna.storages.JournalStorage(file_storage)
elif self.storage_specifier == "grpc_rdb":
self.tempfile = NamedTemporaryFilePool().tempfile()
url = "sqlite:///{}".format(self.tempfile.name)
self.backend_storage = optuna.storages.RDBStorage(url)
self.storage = self._create_proxy(self.backend_storage)
elif self.storage_specifier == "grpc_proxy":
assert "base_storage" in self.extra_args
self.storage = self._create_proxy(self.extra_args["base_storage"])
else:
assert False
return self.storage
def _create_proxy(
self, storage: BaseStorage, thread_pool: ThreadPoolExecutor | None = None
) -> GrpcStorageProxy:
with _lock_to_search_for_free_port():
port = _find_free_port()
self.server = optuna.storages._grpc.server.make_server(
storage, "localhost", port, thread_pool=thread_pool
)
self.thread = threading.Thread(target=self.server.start)
self.thread.start()
self.proxy = GrpcStorageProxy(host="localhost", port=port)
self.proxy.wait_server_ready(timeout=60)
return self.proxy
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
# Unit tests create many short-lived Engine objects, so the connections created by the
# engine should be explicitly closed.
if isinstance(self.storage, optuna.storages.RDBStorage):
self.storage.engine.dispose()
elif isinstance(self.storage, optuna.storages._CachedStorage):
self.storage._backend.engine.dispose()
elif self.storage_specifier == "grpc_rdb":
assert isinstance(self.backend_storage, optuna.storages.RDBStorage)
self.backend_storage.engine.dispose()
if self.tempfile:
self.tempfile.close()
if self.proxy:
self.proxy.close()
self.proxy = None
if self.server:
assert self.thread is not None
self.server.stop(5).wait()
self.thread.join()
self.server = None
self.thread = None
def _find_free_port() -> int:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for port in range(13000, 13100):
try:
sock.bind(("localhost", port))
return port
except OSError:
continue
assert False, "must not reach here"
| StorageSupplier |
python | kamyu104__LeetCode-Solutions | Python/critical-connections-in-a-network.py | {
"start": 128,
"end": 1209
} | class ____(object):
def criticalConnections(self, n, connections):
"""
:type n: int
:type connections: List[List[int]]
:rtype: List[List[int]]
"""
def dfs(edges, parent, u, idx, lowlinks, lookup, result):
if lookup[u]:
return
lookup[u] = True
curr_idx = lowlinks[u] = idx[0]
idx[0] += 1
for v in edges[u]:
if v == parent:
continue
dfs(edges, u, v, idx, lowlinks, lookup, result)
lowlinks[u] = min(lowlinks[u], lowlinks[v])
if lowlinks[v] > curr_idx:
# if any lowlink of neighbors is larger than curr_idx
result.append([u, v])
edges = [[] for _ in xrange(n)]
idx, lowlinks, lookup = [0], [0]*n, [False]*n
result = []
for u, v in connections:
edges[u].append(v)
edges[v].append(u)
dfs(edges, -1, 0, idx, lowlinks, lookup, result)
return result
| Solution |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 90838,
"end": 91142
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
self.relu = nn.ReLU(inplace=False).to(dtype=torch.float)
def forward(self, x):
return self.relu(self.conv(x))
| SubModelWithoutFusion |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_setitem.py | {
"start": 44092,
"end": 44487
} | class ____(CoercionTest):
# previously test_setitem_series_int64 in tests.indexing.test_coercion
@pytest.fixture
def obj(self):
return Series([1, 2, 3, 4])
@pytest.mark.parametrize(
"val,exp_dtype,raises",
[
(1, np.float64, False),
(1.1, np.float64, False),
(1 + 1j, np.complex128, True),
(True, object, True),
],
)
| TestCoercionInt64 |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/errors.py | {
"start": 255,
"end": 383
} | class ____(graphene.Interface):
message = graphene.String(required=True)
class Meta:
name = "Error"
| GrapheneError |
python | pallets__werkzeug | src/werkzeug/routing/exceptions.py | {
"start": 1437,
"end": 1769
} | class ____(RoutingException): # noqa: B903
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values: t.Mapping[str, t.Any], endpoint: t.Any) -> None:
super().__init__()
self.matched_values = matched_values
self.endpoint = endpoint
| RequestAliasRedirect |
python | openai__openai-python | src/openai/resources/batches.py | {
"start": 19451,
"end": 20008
} | class ____:
def __init__(self, batches: AsyncBatches) -> None:
self._batches = batches
self.create = _legacy_response.async_to_raw_response_wrapper(
batches.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
batches.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
batches.list,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
batches.cancel,
)
| AsyncBatchesWithRawResponse |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/utils/sql.py | {
"start": 1764,
"end": 9634
} | class ____:
"""Temporary object used to construct OpenLineage Dataset."""
table: str
schema: str | None
database: str | None
fields: list[schema_dataset.SchemaDatasetFacetFields]
def to_dataset(self, namespace: str, database: str | None = None, schema: str | None = None) -> Dataset:
# Prefix the table name with database and schema name using
# the format: {database_name}.{table_schema}.{table_name}.
name = ".".join(
part
for part in [self.database or database, self.schema or schema, self.table]
if part is not None
)
return Dataset(
namespace=namespace,
name=name,
facets={"schema": schema_dataset.SchemaDatasetFacet(fields=self.fields)} if self.fields else {},
)
def get_table_schemas(
hook: BaseHook,
namespace: str,
schema: str | None,
database: str | None,
in_query: str | None,
out_query: str | None,
) -> tuple[list[Dataset], list[Dataset]]:
"""
Query database for table schemas.
Uses provided hook. Responsibility to provide queries for this function is on particular extractors.
If query for input or output table isn't provided, the query is skipped.
"""
# Do not query if we did not get both queries
if not in_query and not out_query:
return [], []
log.debug("Starting to query database for table schemas")
with closing(hook.get_conn()) as conn, closing(conn.cursor()) as cursor:
if in_query:
cursor.execute(in_query)
in_datasets = [x.to_dataset(namespace, database, schema) for x in parse_query_result(cursor)]
else:
in_datasets = []
if out_query:
cursor.execute(out_query)
out_datasets = [x.to_dataset(namespace, database, schema) for x in parse_query_result(cursor)]
else:
out_datasets = []
log.debug("Got table schema query result from database.")
return in_datasets, out_datasets
def parse_query_result(cursor) -> list[TableSchema]:
"""
Fetch results from DB-API 2.0 cursor and creates list of table schemas.
For each row it creates :class:`TableSchema`.
"""
schemas: dict = {}
columns: dict = defaultdict(list)
for row in cursor.fetchall():
table_schema_name: str = row[ColumnIndex.SCHEMA]
table_name: str = row[ColumnIndex.TABLE_NAME]
table_column = schema_dataset.SchemaDatasetFacetFields(
name=row[ColumnIndex.COLUMN_NAME],
type=row[ColumnIndex.UDT_NAME],
description=None,
)
ordinal_position = row[ColumnIndex.ORDINAL_POSITION]
try:
table_database = row[ColumnIndex.DATABASE]
except IndexError:
table_database = None
# Attempt to get table schema
table_key = ".".join(filter(None, [table_database, table_schema_name, table_name]))
schemas[table_key] = TableSchema(
table=table_name, schema=table_schema_name, database=table_database, fields=[]
)
columns[table_key].append((ordinal_position, table_column))
for schema in schemas.values():
table_key = ".".join(filter(None, [schema.database, schema.schema, schema.table]))
schema.fields = [x for _, x in sorted(columns[table_key])]
return list(schemas.values())
def create_information_schema_query(
columns: list[str],
information_schema_table_name: str,
tables_hierarchy: TablesHierarchy,
uppercase_names: bool = False,
use_flat_cross_db_query: bool = False,
sqlalchemy_engine: Engine | None = None,
) -> str:
"""Create query for getting table schemas from information schema."""
metadata = MetaData()
select_statements = []
# Don't iterate over tables hierarchy, just pass it to query single information schema table
if use_flat_cross_db_query:
information_schema_table = Table(
information_schema_table_name,
metadata,
*[Column(column) for column in columns],
quote=False,
)
filter_clauses = create_filter_clauses(
tables_hierarchy,
information_schema_table,
uppercase_names=uppercase_names,
)
select_statements.append(information_schema_table.select().filter(filter_clauses))
else:
for db, schema_mapping in tables_hierarchy.items():
# Information schema table name is expected to be "< information_schema schema >.<view/table name>"
# usually "information_schema.columns". In order to use table identifier correct for various table
# we need to pass first part of dot-separated identifier as `schema` argument to `sqlalchemy.Table`.
if db:
# Use database as first part of table identifier.
schema = db
table_name = information_schema_table_name
else:
# When no database passed, use schema as first part of table identifier.
schema, table_name = information_schema_table_name.split(".")
information_schema_table = Table(
table_name,
metadata,
*[Column(column) for column in columns],
schema=schema,
quote=False,
)
filter_clauses = create_filter_clauses(
{None: schema_mapping},
information_schema_table,
uppercase_names=uppercase_names,
)
select_statements.append(information_schema_table.select().filter(filter_clauses))
return str(
union_all(*select_statements).compile(sqlalchemy_engine, compile_kwargs={"literal_binds": True})
)
def create_filter_clauses(
mapping: dict,
information_schema_table: Table,
uppercase_names: bool = False,
) -> ColumnElement[bool]:
"""
Create comprehensive filter clauses for all tables in one database.
:param mapping: a nested dictionary of database, schema names and list of tables in each
:param information_schema_table: `sqlalchemy.Table` instance used to construct clauses
For most SQL dbs it contains `table_name` and `table_schema` columns,
therefore it is expected the table has them defined.
:param uppercase_names: if True use schema and table names uppercase
"""
table_schema_column_name = information_schema_table.columns[ColumnIndex.SCHEMA].name
table_name_column_name = information_schema_table.columns[ColumnIndex.TABLE_NAME].name
try:
table_database_column_name = information_schema_table.columns[ColumnIndex.DATABASE].name
except IndexError:
table_database_column_name = ""
filter_clauses = []
for db, schema_mapping in mapping.items():
schema_level_clauses = []
for schema, tables in schema_mapping.items():
filter_clause: ColumnElement[bool] = information_schema_table.c[table_name_column_name].in_(
[name.upper() if uppercase_names else name for name in tables]
)
if schema:
schema_upper = schema.upper() if uppercase_names else schema
filter_clause = and_(
information_schema_table.c[table_schema_column_name] == schema_upper, filter_clause
)
schema_level_clauses.append(filter_clause)
if db and table_database_column_name:
db_upper = db.upper() if uppercase_names else db
filter_clause = and_(
information_schema_table.c[table_database_column_name] == db_upper, or_(*schema_level_clauses)
)
filter_clauses.append(filter_clause)
else:
filter_clauses.extend(schema_level_clauses)
return or_(*filter_clauses)
| TableSchema |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-agent-search-retriever/llama_index/packs/agent_search_retriever/base.py | {
"start": 2141,
"end": 2991
} | class ____(BaseLlamaPack):
"""AgentSearchRetrieverPack for running an agent-search retriever."""
def __init__(
self,
similarity_top_k: int = 2,
search_provider: str = "agent-search",
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> None:
self.retriever = AgentSearchRetriever(
search_provider=search_provider,
api_key=api_key,
api_base=api_base,
similarity_top_k=similarity_top_k,
)
super().__init__()
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"retriever": self.retriever,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self._retriever.retrieve(*args, **kwargs)
| AgentSearchRetrieverPack |
python | PyCQA__pylint | pylint/checkers/misc.py | {
"start": 1799,
"end": 7059
} | class ____(BaseTokenChecker, BaseRawFileChecker):
"""BaseChecker for encoding issues and fixme notes.
Checks for:
* warning notes in the code like FIXME, XXX
* encoding issues.
"""
# configuration section name
name = "miscellaneous"
msgs = {
"W0511": (
"%s",
"fixme",
"Used when a warning note as FIXME or XXX is detected.",
)
}
options = (
(
"notes",
{
"type": "csv",
"metavar": "<comma separated values>",
"default": ("FIXME", "XXX", "TODO"),
"help": (
"List of note tags to take in consideration, "
"separated by a comma."
),
},
),
(
"notes-rgx",
{
"type": "string",
"metavar": "<regexp>",
"help": "Regular expression of note tags to take in consideration.",
"default": "",
},
),
(
"check-fixme-in-docstring",
{
"type": "yn",
"metavar": "<y or n>",
"default": False,
"help": "Whether or not to search for fixme's in docstrings.",
},
),
)
def open(self) -> None:
super().open()
notes = "|".join(re.escape(note) for note in self.linter.config.notes)
if self.linter.config.notes_rgx:
notes += f"|{self.linter.config.notes_rgx}"
comment_regex = rf"#\s*(?P<msg>({notes})(?=(:|\s|\Z)).*?$)"
self._comment_fixme_pattern = re.compile(comment_regex, re.I)
# single line docstring like '''this''' or """this"""
docstring_regex = rf"((\"\"\")|(\'\'\'))\s*(?P<msg>({notes})(?=(:|\s|\Z)).*?)((\"\"\")|(\'\'\'))"
self._docstring_fixme_pattern = re.compile(docstring_regex, re.I)
# multiline docstrings which will be split into newlines
# so we do not need to look for quotes/double-quotes
multiline_docstring_regex = rf"^\s*(?P<msg>({notes})(?=(:|\s|\Z)).*$)"
self._multiline_docstring_fixme_pattern = re.compile(
multiline_docstring_regex, re.I
)
def _check_encoding(
self, lineno: int, line: bytes, file_encoding: str
) -> str | None:
try:
return line.decode(file_encoding)
except UnicodeDecodeError:
pass
except LookupError:
if (
line.startswith(b"#")
and "coding" in str(line)
and file_encoding in str(line)
):
msg = f"Cannot decode using encoding '{file_encoding}', bad encoding"
self.add_message("syntax-error", line=lineno, args=msg)
return None
def process_module(self, node: nodes.Module) -> None:
"""Inspect the source file to find encoding problem."""
encoding = node.file_encoding if node.file_encoding else "ascii"
with node.stream() as stream:
for lineno, line in enumerate(stream):
self._check_encoding(lineno + 1, line, encoding)
def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
"""Inspect the source to find fixme problems."""
if not self.linter.config.notes:
return
for token_info in tokens:
if token_info.type == tokenize.COMMENT:
if match := self._comment_fixme_pattern.match(token_info.string):
self.add_message(
"fixme",
col_offset=token_info.start[1] + 1,
args=match.group("msg"),
line=token_info.start[0],
)
elif self.linter.config.check_fixme_in_docstring:
if self._is_multiline_docstring(token_info):
docstring_lines = token_info.string.split("\n")
for line_no, line in enumerate(docstring_lines):
if match := self._multiline_docstring_fixme_pattern.match(line):
self.add_message(
"fixme",
col_offset=token_info.start[1] + 1,
args=match.group("msg"),
line=token_info.start[0] + line_no,
)
elif match := self._docstring_fixme_pattern.match(token_info.string):
self.add_message(
"fixme",
col_offset=token_info.start[1] + 1,
args=match.group("msg"),
line=token_info.start[0],
)
def _is_multiline_docstring(self, token_info: tokenize.TokenInfo) -> bool:
return (
token_info.type == tokenize.STRING
and (token_info.line.lstrip().startswith(('"""', "'''")))
and "\n" in token_info.line.rstrip()
)
def register(linter: PyLinter) -> None:
linter.register_checker(EncodingChecker(linter))
linter.register_checker(ByIdManagedMessagesChecker(linter))
| EncodingChecker |
python | prabhupant__python-ds | data_structures/hash/hash_table.py | {
"start": 75,
"end": 348
} | class ____:
def __init__(self):
self.hash_table =
def check_collision(self):
pass
def add_to_linked_list(self):
pass
def insert(self):
pass
def delete(self):
pass
def get(self):
pass
| HashTable |
python | spack__spack | lib/spack/spack/cmd/create.py | {
"start": 11671,
"end": 16415
} | class ____(PackageTemplate):
"""Provides appropriate overrides for python extensions"""
base_class_name = "PythonPackage"
package_class_import = "from spack_repo.builtin.build_systems.python import PythonPackage"
dependencies = """\
# FIXME: Only add the python/pip/wheel dependencies if you need specific versions
# or need to change the dependency type. Generic python/pip/wheel dependencies are
# added implicity by the PythonPackage base class.
# depends_on("python@2.X:2.Y,3.Z:", type=("build", "run"))
# depends_on("py-pip@X.Y:", type="build")
# depends_on("py-wheel@X.Y:", type="build")
# FIXME: Add a build backend, usually defined in pyproject.toml. If no such file
# exists, use setuptools.
# depends_on("py-setuptools", type="build")
# depends_on("py-hatchling", type="build")
# depends_on("py-flit-core", type="build")
# depends_on("py-poetry-core", type="build")
# FIXME: Add additional dependencies if required.
# depends_on("py-foo", type=("build", "run"))"""
body_def = """\
def config_settings(self, spec, prefix):
# FIXME: Add configuration settings to be passed to the build backend
# FIXME: If not needed, delete this function
settings = {}
return settings"""
def __init__(self, name, url, versions, languages: List[str]):
# If the user provided `--name py-numpy`, don't rename it py-py-numpy
if not name.startswith("py-"):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to py-{0}".format(name))
name = "py-{0}".format(name)
# Simple PyPI URLs:
# https://<hostname>/packages/<type>/<first character of project>/<project>/<download file>
# e.g. https://pypi.io/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://www.pypi.io/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.python.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/source/n/numpy/numpy-1.19.4.zip
# PyPI URLs containing hash:
# https://<hostname>/packages/<two character hash>/<two character hash>/<longer hash>/<download file>
# e.g. https://pypi.io/packages/c5/63/a48648ebc57711348420670bb074998f79828291f68aebfff1642be212ec/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/c5/63/a48648ebc57711348420670bb074998f79828291f68aebfff1642be212ec/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/c5/63/a48648ebc57711348420670bb074998f79828291f68aebfff1642be212ec/numpy-1.19.4.zip#sha256=141ec3a3300ab89c7f2b0775289954d193cc8edb621ea05f99db9cb181530512
# PyPI URLs for wheels:
# https://pypi.io/packages/py3/a/azureml_core/azureml_core-1.11.0-py3-none-any.whl
# https://pypi.io/packages/py3/d/dotnetcore2/dotnetcore2-2.1.14-py3-none-macosx_10_9_x86_64.whl
# https://pypi.io/packages/py3/d/dotnetcore2/dotnetcore2-2.1.14-py3-none-manylinux1_x86_64.whl
# https://files.pythonhosted.org/packages/cp35.cp36.cp37.cp38.cp39/s/shiboken2/shiboken2-5.15.2-5.15.2-cp35.cp36.cp37.cp38.cp39-abi3-manylinux1_x86_64.whl
# https://files.pythonhosted.org/packages/f4/99/ad2ef1aeeb395ee2319bb981ea08dbbae878d30dd28ebf27e401430ae77a/azureml_core-1.36.0.post2-py3-none-any.whl#sha256=60bcad10b4380d78a8280deb7365de2c2cd66527aacdcb4a173f613876cbe739
match = re.search(r"(?:pypi|pythonhosted)[^/]+/packages" + "/([^/#]+)" * 4, url)
if match:
# PyPI URLs for wheels are too complicated, ignore them for now
# https://www.python.org/dev/peps/pep-0427/#file-name-convention
if not match.group(4).endswith(".whl"):
if len(match.group(2)) == 1:
# Simple PyPI URL
url = "/".join(match.group(3, 4))
else:
# PyPI URL containing hash
# Project name doesn't necessarily match download name, but it
# usually does, so this is the best we can do
project = parse_name(url)
url = "/".join([project, match.group(4)])
self.url_line = ' pypi = "{url}"'
else:
# Add a reminder about spack preferring PyPI URLs
self.url_line = (
"""
# FIXME: ensure the package is not available through PyPI. If it is,
# re-run `spack create --force` with the PyPI URL.
"""
+ self.url_line
)
super().__init__(name, url, versions, languages)
| PythonPackageTemplate |
python | ray-project__ray | rllib/algorithms/impala/impala_tf_policy.py | {
"start": 1195,
"end": 6181
} | class ____:
def __init__(
self,
actions,
actions_logp,
actions_entropy,
dones,
behaviour_action_logp,
behaviour_logits,
target_logits,
discount,
rewards,
values,
bootstrap_value,
dist_class,
model,
valid_mask,
config,
vf_loss_coeff=0.5,
entropy_coeff=0.01,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""Policy gradient loss with vtrace importance weighting.
VTraceLoss takes tensors of shape [T, B, ...], where `B` is the
batch_size. The reason we need to know `B` is for V-trace to properly
handle episode cut boundaries.
Args:
actions: An int|float32 tensor of shape [T, B, ACTION_SPACE].
actions_logp: A float32 tensor of shape [T, B].
actions_entropy: A float32 tensor of shape [T, B].
dones: A bool tensor of shape [T, B].
behaviour_action_logp: Tensor of shape [T, B].
behaviour_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
target_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
discount: A float32 scalar.
rewards: A float32 tensor of shape [T, B].
values: A float32 tensor of shape [T, B].
bootstrap_value: A float32 tensor of shape [B].
dist_class: action distribution class for logits.
valid_mask: A bool tensor of valid RNN input elements (#2992).
config: Algorithm config dict.
"""
# Compute vtrace on the CPU for better performance.
with tf.device("/cpu:0"):
self.vtrace_returns = vtrace.multi_from_logits(
behaviour_action_log_probs=behaviour_action_logp,
behaviour_policy_logits=behaviour_logits,
target_policy_logits=target_logits,
actions=tf.unstack(actions, axis=2),
discounts=tf.cast(~tf.cast(dones, tf.bool), tf.float32) * discount,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
dist_class=dist_class,
model=model,
clip_rho_threshold=tf.cast(clip_rho_threshold, tf.float32),
clip_pg_rho_threshold=tf.cast(clip_pg_rho_threshold, tf.float32),
)
self.value_targets = self.vtrace_returns.vs
# The policy gradients loss.
masked_pi_loss = tf.boolean_mask(
actions_logp * self.vtrace_returns.pg_advantages, valid_mask
)
self.pi_loss = -tf.reduce_sum(masked_pi_loss)
self.mean_pi_loss = -tf.reduce_mean(masked_pi_loss)
# The baseline loss.
delta = tf.boolean_mask(values - self.vtrace_returns.vs, valid_mask)
delta_squarred = tf.math.square(delta)
self.vf_loss = 0.5 * tf.reduce_sum(delta_squarred)
self.mean_vf_loss = 0.5 * tf.reduce_mean(delta_squarred)
# The entropy loss.
masked_entropy = tf.boolean_mask(actions_entropy, valid_mask)
self.entropy = tf.reduce_sum(masked_entropy)
self.mean_entropy = tf.reduce_mean(masked_entropy)
# The summed weighted loss.
self.total_loss = self.pi_loss - self.entropy * entropy_coeff
# Optional vf loss (or in a separate term due to separate
# optimizers/networks).
self.loss_wo_vf = self.total_loss
if not config["_separate_vf_optimizer"]:
self.total_loss += self.vf_loss * vf_loss_coeff
def _make_time_major(policy, seq_lens, tensor):
"""Swaps batch and trajectory axis.
Args:
policy: Policy reference
seq_lens: Sequence lengths if recurrent or None
tensor: A tensor or list of tensors to reshape.
trajectory item.
Returns:
res: A tensor with swapped axes or a list of tensors with
swapped axes.
"""
if isinstance(tensor, list):
return [_make_time_major(policy, seq_lens, t) for t in tensor]
if policy.is_recurrent():
B = tf.shape(seq_lens)[0]
T = tf.shape(tensor)[0] // B
else:
# Important: chop the tensor into batches at known episode cut
# boundaries.
# TODO: (sven) this is kind of a hack and won't work for
# batch_mode=complete_episodes.
T = policy.config["rollout_fragment_length"]
B = tf.shape(tensor)[0] // T
rs = tf.reshape(tensor, tf.concat([[B, T], tf.shape(tensor)[1:]], axis=0))
# swap B and T axes
res = tf.transpose(rs, [1, 0] + list(range(2, 1 + int(tf.shape(tensor).shape[0]))))
return res
| VTraceLoss |
python | django__django | django/core/mail/utils.py | {
"start": 260,
"end": 506
} | class ____:
def __str__(self):
return self.get_fqdn()
def get_fqdn(self):
if not hasattr(self, "_fqdn"):
self._fqdn = punycode(socket.getfqdn())
return self._fqdn
DNS_NAME = CachedDnsName()
| CachedDnsName |
python | doocs__leetcode | solution/3200-3299/3263.Convert Doubly Linked List to Array I/Solution.py | {
"start": 171,
"end": 365
} | class ____:
def toArray(self, root: "Optional[Node]") -> List[int]:
ans = []
while root:
ans.append(root.val)
root = root.next
return ans
| Solution |
python | euske__pdfminer | pdfminer/pdfdevice.py | {
"start": 227,
"end": 1096
} | class ____:
def __init__(self, rsrcmgr):
self.rsrcmgr = rsrcmgr
self.ctm = None
return
def __repr__(self):
return '<PDFDevice>'
def close(self):
return
def set_ctm(self, ctm):
self.ctm = ctm
return
def begin_tag(self, tag, props=None):
return
def end_tag(self):
return
def do_tag(self, tag, props=None):
return
def begin_page(self, page, ctm):
return
def end_page(self, page):
return
def begin_figure(self, name, bbox, matrix):
return
def end_figure(self, name):
return
def paint_path(self, graphicstate, stroke, fill, evenodd, path):
return
def render_image(self, name, stream):
return
def render_string(self, textstate, seq):
return
## PDFTextDevice
##
| PDFDevice |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/auth.py | {
"start": 629,
"end": 1537
} | class ____(TokenAuthenticator):
"""
Making Authenticator to be able to accept Header-Based authentication.
"""
def __init__(self, config: Mapping[str, Any]):
self.config = config
def get_auth_header(self) -> Mapping[str, Any]:
auth_header: str = "X-Shopify-Access-Token"
credentials: Dict = self.config.get("credentials", self.config.get("auth_method"))
auth_method: str = credentials.get("auth_method")
if auth_method in ["oauth2.0", "access_token"]:
access_token = credentials.get("access_token")
if access_token:
return {auth_header: access_token}
else:
raise MissingAccessTokenError
elif auth_method == "api_password":
return {auth_header: credentials.get("api_password")}
else:
raise NotImplementedAuth(auth_method)
| ShopifyAuthenticator |
python | django__django | tests/invalid_models_tests/test_deprecated_fields.py | {
"start": 244,
"end": 5683
} | class ____(SimpleTestCase):
def test_IPAddressField_deprecated(self):
class IPAddressModel(models.Model):
ip = models.IPAddressField()
model = IPAddressModel()
self.assertEqual(
model.check(),
[
checks.Error(
"IPAddressField has been removed except for support in "
"historical migrations.",
hint="Use GenericIPAddressField instead.",
obj=IPAddressModel._meta.get_field("ip"),
id="fields.E900",
)
],
)
def test_CommaSeparatedIntegerField_deprecated(self):
class CommaSeparatedIntegerModel(models.Model):
csi = models.CommaSeparatedIntegerField(max_length=64)
model = CommaSeparatedIntegerModel()
self.assertEqual(
model.check(),
[
checks.Error(
"CommaSeparatedIntegerField is removed except for support in "
"historical migrations.",
hint=(
"Use "
"CharField(validators=[validate_comma_separated_integer_list]) "
"instead."
),
obj=CommaSeparatedIntegerModel._meta.get_field("csi"),
id="fields.E901",
)
],
)
def test_nullbooleanfield_deprecated(self):
class NullBooleanFieldModel(models.Model):
nb = models.NullBooleanField()
model = NullBooleanFieldModel()
self.assertEqual(
model.check(),
[
checks.Error(
"NullBooleanField is removed except for support in historical "
"migrations.",
hint="Use BooleanField(null=True, blank=True) instead.",
obj=NullBooleanFieldModel._meta.get_field("nb"),
id="fields.E903",
),
],
)
@skipUnless(connection.vendor == "postgresql", "PostgreSQL specific SQL")
def test_postgres_jsonfield_deprecated(self):
from django.contrib.postgres.fields import JSONField
class PostgresJSONFieldModel(models.Model):
field = JSONField()
self.assertEqual(
PostgresJSONFieldModel.check(),
[
checks.Error(
"django.contrib.postgres.fields.JSONField is removed except "
"for support in historical migrations.",
hint="Use django.db.models.JSONField instead.",
obj=PostgresJSONFieldModel._meta.get_field("field"),
id="fields.E904",
),
],
)
@skipUnless(connection.vendor == "postgresql", "PostgreSQL specific SQL")
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
def test_postgres_ci_fields_deprecated(self):
from django.contrib.postgres.fields import (
ArrayField,
CICharField,
CIEmailField,
CITextField,
)
class PostgresCIFieldsModel(models.Model):
ci_char = CICharField(max_length=255)
ci_email = CIEmailField()
ci_text = CITextField()
array_ci_text = ArrayField(CITextField())
self.assertEqual(
PostgresCIFieldsModel.check(),
[
checks.Error(
"django.contrib.postgres.fields.CICharField is removed except for "
"support in historical migrations.",
hint=(
'Use CharField(db_collation="…") with a case-insensitive '
"non-deterministic collation instead."
),
obj=PostgresCIFieldsModel._meta.get_field("ci_char"),
id="fields.E905",
),
checks.Error(
"django.contrib.postgres.fields.CIEmailField is removed except for "
"support in historical migrations.",
hint=(
'Use EmailField(db_collation="…") with a case-insensitive '
"non-deterministic collation instead."
),
obj=PostgresCIFieldsModel._meta.get_field("ci_email"),
id="fields.E906",
),
checks.Error(
"django.contrib.postgres.fields.CITextField is removed except for "
"support in historical migrations.",
hint=(
'Use TextField(db_collation="…") with a case-insensitive '
"non-deterministic collation instead."
),
obj=PostgresCIFieldsModel._meta.get_field("ci_text"),
id="fields.E907",
),
checks.Error(
"Base field for array has errors:\n"
" django.contrib.postgres.fields.CITextField is removed except "
"for support in historical migrations. (fields.E907)",
obj=PostgresCIFieldsModel._meta.get_field("array_ci_text"),
id="postgres.E001",
),
],
)
| DeprecatedFieldsTests |
python | ray-project__ray | python/ray/data/_internal/execution/operators/map_transformer.py | {
"start": 911,
"end": 4354
} | class ____(ABC):
"""Represents a single transform function in a MapTransformer."""
def __init__(
self,
input_type: MapTransformFnDataType,
*,
is_udf: bool = False,
output_block_size_option: Optional[OutputBlockSizeOption] = None,
):
"""
Args:
input_type: Expected type of the input data.
is_udf: Whether this transformation is UDF or not.
output_block_size_option: (Optional) Output block size configuration.
"""
self._input_type = input_type
self._output_block_size_option = output_block_size_option
self._is_udf = is_udf
@abstractmethod
def _post_process(self, results: Iterable[MapTransformFnData]) -> Iterable[Block]:
pass
@abstractmethod
def _apply_transform(
self, ctx: TaskContext, inputs: Iterable[MapTransformFnData]
) -> Iterable[MapTransformFnData]:
pass
def _pre_process(self, blocks: Iterable[Block]) -> Iterable[MapTransformFnData]:
return blocks
def _shape_blocks(self, results: Iterable[MapTransformFnData]) -> Iterable[Block]:
buffer = BlockOutputBuffer(self._output_block_size_option)
# This method supports following modes of shaping of the output blocks:
#
# 1. Incremental: block is accumulated up to configured
# ``_output_block_size_option``
#
# 2. *Non-incremental* (aka 1 block in / 1 block out): when
# no ``OutputBlockSizeOption`` is provided this method will absorb
# the whole input sequence and produce single block as an output
#
if self._input_type == MapTransformFnDataType.Block:
append = buffer.add_block
elif self._input_type == MapTransformFnDataType.Batch:
append = buffer.add_batch
else:
assert self._input_type == MapTransformFnDataType.Row
append = buffer.add
# Iterate over input sequence appending results to the
# buffer, while yielding incrementally
for result in results:
append(result)
# Try yielding incrementally
while buffer.has_next():
yield buffer.next()
# Finalize buffer
buffer.finalize()
# Yield remaining blocks from it
while buffer.has_next():
yield buffer.next()
def __call__(
self,
blocks: Iterable[Block],
ctx: TaskContext,
) -> Iterable[Block]:
batches = self._pre_process(blocks)
results = self._apply_transform(ctx, batches)
yield from self._post_process(results)
@property
def output_block_size_option(self):
return self._output_block_size_option
def override_target_max_block_size(self, target_max_block_size: Optional[int]):
self._output_block_size_option = OutputBlockSizeOption.of(
target_max_block_size=target_max_block_size
)
@property
def target_max_block_size(self):
if self._output_block_size_option is None:
return None
else:
return self._output_block_size_option.target_max_block_size
@property
def target_num_rows_per_block(self):
if self._output_block_size_option is None:
return None
else:
return self._output_block_size_option.target_num_rows_per_block
| MapTransformFn |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/base.py | {
"start": 100571,
"end": 104782
} | class ____(Transaction):
"""Represent a 'nested', or SAVEPOINT transaction.
The :class:`.NestedTransaction` object is created by calling the
:meth:`_engine.Connection.begin_nested` method of
:class:`_engine.Connection`.
When using :class:`.NestedTransaction`, the semantics of "begin" /
"commit" / "rollback" are as follows:
* the "begin" operation corresponds to the "BEGIN SAVEPOINT" command, where
the savepoint is given an explicit name that is part of the state
of this object.
* The :meth:`.NestedTransaction.commit` method corresponds to a
"RELEASE SAVEPOINT" operation, using the savepoint identifier associated
with this :class:`.NestedTransaction`.
* The :meth:`.NestedTransaction.rollback` method corresponds to a
"ROLLBACK TO SAVEPOINT" operation, using the savepoint identifier
associated with this :class:`.NestedTransaction`.
The rationale for mimicking the semantics of an outer transaction in
terms of savepoints so that code may deal with a "savepoint" transaction
and an "outer" transaction in an agnostic way.
.. seealso::
:ref:`session_begin_nested` - ORM version of the SAVEPOINT API.
"""
__slots__ = ("connection", "is_active", "_savepoint", "_previous_nested")
_savepoint: str
def __init__(self, connection: Connection):
assert connection._transaction is not None
if connection._trans_context_manager:
TransactionalContext._trans_ctx_check(connection)
self.connection = connection
self._savepoint = self.connection._savepoint_impl()
self.is_active = True
self._previous_nested = connection._nested_transaction
connection._nested_transaction = self
def _deactivate_from_connection(self, warn: bool = True) -> None:
if self.connection._nested_transaction is self:
self.connection._nested_transaction = self._previous_nested
elif warn:
util.warn(
"nested transaction already deassociated from connection"
)
@property
def _deactivated_from_connection(self) -> bool:
return self.connection._nested_transaction is not self
def _cancel(self) -> None:
# called by RootTransaction when the outer transaction is
# committed, rolled back, or closed to cancel all savepoints
# without any action being taken
self.is_active = False
self._deactivate_from_connection()
if self._previous_nested:
self._previous_nested._cancel()
def _close_impl(
self, deactivate_from_connection: bool, warn_already_deactive: bool
) -> None:
try:
if (
self.is_active
and self.connection._transaction
and self.connection._transaction.is_active
):
self.connection._rollback_to_savepoint_impl(self._savepoint)
finally:
self.is_active = False
if deactivate_from_connection:
self._deactivate_from_connection(warn=warn_already_deactive)
assert not self.is_active
if deactivate_from_connection:
assert self.connection._nested_transaction is not self
def _do_close(self) -> None:
self._close_impl(True, False)
def _do_rollback(self) -> None:
self._close_impl(True, True)
def _do_commit(self) -> None:
if self.is_active:
try:
self.connection._release_savepoint_impl(self._savepoint)
finally:
# nested trans becomes inactive on failed release
# unconditionally. this prevents it from trying to
# emit SQL when it rolls back.
self.is_active = False
# but only de-associate from connection if it succeeded
self._deactivate_from_connection()
else:
if self.connection._nested_transaction is self:
self.connection._invalid_transaction()
else:
raise exc.InvalidRequestError(
"This nested transaction is inactive"
)
| NestedTransaction |
python | django__django | tests/i18n/test_extraction.py | {
"start": 29758,
"end": 32972
} | class ____(ExtractorTests):
PO_FILE = "locale/%s/LC_MESSAGES/djangojs.po" % LOCALE
def test_javascript_literals(self):
_, po_contents = self._run_makemessages(domain="djangojs")
self.assertMsgId("This literal should be included.", po_contents)
self.assertMsgId("gettext_noop should, too.", po_contents)
self.assertMsgId("This one as well.", po_contents)
self.assertMsgId(r"He said, \"hello\".", po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId(
"/* but this one will be too */ 'cause there is no way of telling...",
po_contents,
)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
def test_media_static_dirs_ignored(self):
"""
Regression test for #23583.
"""
with override_settings(
STATIC_ROOT=os.path.join(self.test_dir, "static/"),
MEDIA_ROOT=os.path.join(self.test_dir, "media_root/"),
):
_, po_contents = self._run_makemessages(domain="djangojs")
self.assertMsgId(
"Static content inside app should be included.", po_contents
)
self.assertNotMsgId(
"Content from STATIC_ROOT should not be included", po_contents
)
@override_settings(STATIC_ROOT=None, MEDIA_ROOT="")
def test_default_root_settings(self):
"""
Regression test for #23717.
"""
_, po_contents = self._run_makemessages(domain="djangojs")
self.assertMsgId("Static content inside app should be included.", po_contents)
def test_i18n_catalog_ignored_when_invoked_for_django(self):
# Create target file so it exists in the filesystem and can be ignored.
# "invoked_for_django" is True when "conf/locale" folder exists.
os.makedirs(os.path.join("conf", "locale"))
i18n_catalog_js_dir = os.path.join(os.path.curdir, "views", "templates")
os.makedirs(i18n_catalog_js_dir)
open(os.path.join(i18n_catalog_js_dir, "i18n_catalog.js"), "w").close()
out, _ = self._run_makemessages(domain="djangojs")
self.assertIn(f"ignoring file i18n_catalog.js in {i18n_catalog_js_dir}", out)
def test_i18n_catalog_not_ignored_when_not_invoked_for_django(self):
# Create target file so it exists in the filesystem but is NOT ignored.
# "invoked_for_django" is False when "conf/locale" folder does not
# exist.
self.assertIs(os.path.exists(os.path.join("conf", "locale")), False)
i18n_catalog_js = os.path.join("views", "templates", "i18n_catalog.js")
os.makedirs(os.path.dirname(i18n_catalog_js))
open(i18n_catalog_js, "w").close()
out, _ = self._run_makemessages(domain="djangojs")
self.assertNotIn("ignoring file i18n_catalog.js", out)
| JavaScriptExtractorTests |
python | kamyu104__LeetCode-Solutions | Python/length-of-the-longest-increasing-path.py | {
"start": 108,
"end": 997
} | class ____(object):
def maxPathLength(self, coordinates, k):
"""
:type coordinates: List[List[int]]
:type k: int
:rtype: int
"""
def longest_increasing_subsequence(arr):
result = []
for x in arr:
i = bisect.bisect_left(result, x)
if i == len(result):
result.append(x)
else:
result[i] = x
return len(result)
target = coordinates[k]
coordinates.sort(key=lambda x: (x[0], -x[1]))
left, right = [], []
for x, y in coordinates:
if x < target[0] and y < target[1]:
left.append(y)
elif x > target[0] and y > target[1]:
right.append(y)
return longest_increasing_subsequence(left)+1+longest_increasing_subsequence(right)
| Solution |
python | google__python-fire | fire/test_components.py | {
"start": 3787,
"end": 4213
} | class ____:
"""Test class for testing Python Fire with a property with varargs."""
def cumsums(self, *items):
total = None
sums = []
for item in items:
if total is None:
total = item
else:
total += item
sums.append(total)
return sums
def varchars(self, alpha=0, beta=0, *chars): # pylint: disable=keyword-arg-before-vararg
return alpha, beta, ''.join(chars)
| VarArgs |
python | pydantic__pydantic | tests/test_discriminated_union.py | {
"start": 13891,
"end": 13931
} | class ____(int, Enum):
pass
| FooIntEnum |
python | python__mypy | mypyc/irbuild/for_helpers.py | {
"start": 37911,
"end": 38476
} | class ____(ForDictionaryCommon):
"""Generate optimized IR for a for loop over dictionary keys."""
dict_next_op = dict_next_key_op
dict_iter_op = dict_key_iter_op
def begin_body(self) -> None:
builder = self.builder
line = self.line
# Key is stored at the third place in the tuple.
key = builder.add(TupleGet(self.next_tuple, 2, line))
builder.assign(
builder.get_assignment_target(self.index),
builder.coerce(key, self.target_type, line),
line,
)
| ForDictionaryKeys |
python | ApeWorX__ape | src/ape/plugins/network.py | {
"start": 1813,
"end": 2835
} | class ____(PluginType):
"""
A plugin representing a network provider, which is the main API responsible
for making requests against a blockchain. Example provider plugins projects
include `ape-infura <https://github.com/ApeWorX/ape-infura>`__ as well as
`ape-alchemy <https://github.com/ApeWorX/ape-alchemy>`__.
"""
@hookspec
def providers( # type: ignore[empty-body]
self,
) -> Iterator[tuple[str, str, type["ProviderAPI"]]]:
"""
A hook that must return an iterator of tuples of:
* the target ecosystem plugin's name
* the network it works with (which must be valid network in the ecosystem)
* a :class:`ape.api.providers.ProviderAPI` subclass
Usage example::
@plugins.register(plugins.ProviderPlugin)
def providers():
yield "ethereum", "local", MyProvider
Returns:
Iterator[tuple[str, str, type[:class:`~ape.api.providers.ProviderAPI`]]]
"""
| ProviderPlugin |
python | sanic-org__sanic | sanic/http/stream.py | {
"start": 262,
"end": 689
} | class ____:
stage: Stage
response: Optional[BaseHTTPResponse]
protocol: HttpProtocol
url: Optional[str]
request_body: Optional[bytes]
request_max_size: Union[int, float]
__touchup__: tuple[str, ...] = tuple()
__slots__ = ("request_max_size",)
def respond(
self, response: BaseHTTPResponse
) -> BaseHTTPResponse: # no cov
raise NotImplementedError("Not implemented")
| Stream |
python | doocs__leetcode | solution/1400-1499/1482.Minimum Number of Days to Make m Bouquets/Solution.py | {
"start": 0,
"end": 460
} | class ____:
def minDays(self, bloomDay: List[int], m: int, k: int) -> int:
def check(days: int) -> int:
cnt = cur = 0
for x in bloomDay:
cur = cur + 1 if x <= days else 0
if cur == k:
cnt += 1
cur = 0
return cnt >= m
mx = max(bloomDay)
l = bisect_left(range(mx + 2), True, key=check)
return -1 if l > mx else l
| Solution |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 30593,
"end": 30658
} | class ____(ExprNode):
__slots__ = ("left", "op", "right")
| BinOp |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 22244,
"end": 22726
} | class ____(PointEvent):
''' Announce the start of a pinch event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'pinchstart'
| PinchStart |
python | pytorch__pytorch | torch/ao/quantization/__init__.py | {
"start": 6359,
"end": 7613
} | class ____(ObserverBase):
r"""This observer is used to describe an observer whose quantization parameters
are derived from other observers
"""
def __init__(
self,
dtype: torch.dtype,
obs_or_fqs: list[ObserverOrFakeQuantize],
derive_qparams_fn: Callable[
[list[ObserverOrFakeQuantize]], tuple[Tensor, Tensor]
],
quant_min: int | None = None,
quant_max: int | None = None,
qscheme: torch.qscheme | None = None,
ch_axis: int | None = None,
):
super().__init__(dtype)
self.obs_or_fqs = obs_or_fqs
self.derive_qparams_fn = derive_qparams_fn
self.quant_min = quant_min
self.quant_max = quant_max
self.qscheme = qscheme
self.ch_axis = ch_axis
from .utils import is_per_channel
if is_per_channel(self.qscheme):
if self.ch_axis is None:
raise AssertionError(
"Must provide a valid ch_axis if qscheme is per channel"
)
def forward(self, x: Tensor) -> Tensor:
return x
def calculate_qparams(self): # type:ignore[override]
return self.derive_qparams_fn(self.obs_or_fqs)
| _DerivedObserverOrFakeQuantize |
python | huggingface__transformers | src/transformers/models/mvp/modeling_mvp.py | {
"start": 10588,
"end": 13652
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: MvpConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MvpAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
self_attn_prompt: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape
`(2, encoder_attention_heads, pro_len, head_dim)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
attn_prompt=self_attn_prompt,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return hidden_states, attn_weights
| MvpEncoderLayer |
python | pytorch__pytorch | torch/_inductor/cache.py | {
"start": 1015,
"end": 1813
} | class ____(ABC, Generic[Key, Value]):
"""
Abstract base class for cache implementations.
Provides the interface for cache operations.
"""
@abstractmethod
def get(self: Self, key: Key) -> Value | None:
"""
Retrieve a value from the cache.
Args:
key (Key): The key to look up.
Returns:
Value | None: The cached value if present, else None.
"""
@abstractmethod
def insert(self: Self, key: Key, value: Value) -> bool:
"""
Insert a value into the cache.
Args:
key (Key): The key to insert.
value (Value): The value to associate with the key.
Returns:
bool: True if the value was inserted, False if the key already exists.
"""
| Cache |
python | paramiko__paramiko | paramiko/ssh_exception.py | {
"start": 2735,
"end": 3181
} | class ____(SSHException):
"""
Exception raised when an attempt to open a new `.Channel` fails.
:param int code: the error code returned by the server
.. versionadded:: 1.6
"""
def __init__(self, code, text):
SSHException.__init__(self, code, text)
self.code = code
self.text = text
def __str__(self):
return "ChannelException({!r}, {!r})".format(self.code, self.text)
| ChannelException |
python | lepture__authlib | authlib/oauth2/rfc7009/revocation.py | {
"start": 217,
"end": 4171
} | class ____(TokenEndpoint):
"""Implementation of revocation endpoint which is described in
`RFC7009`_.
.. _RFC7009: https://tools.ietf.org/html/rfc7009
"""
#: Endpoint name to be registered
ENDPOINT_NAME = "revocation"
def authenticate_token(self, request, client):
"""The client constructs the request by including the following
parameters using the "application/x-www-form-urlencoded" format in
the HTTP request entity-body:
token
REQUIRED. The token that the client wants to get revoked.
token_type_hint
OPTIONAL. A hint about the type of the token submitted for
revocation.
"""
self.check_params(request, client)
token = self.query_token(
request.form["token"], request.form.get("token_type_hint")
)
if token and not token.check_client(client):
raise InvalidGrantError()
return token
def check_params(self, request, client):
if "token" not in request.form:
raise InvalidRequestError()
hint = request.form.get("token_type_hint")
if hint and hint not in self.SUPPORTED_TOKEN_TYPES:
raise UnsupportedTokenTypeError()
def create_endpoint_response(self, request):
"""Validate revocation request and create the response for revocation.
For example, a client may request the revocation of a refresh token
with the following request::
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
:returns: (status_code, body, headers)
"""
# The authorization server first validates the client credentials
client = self.authenticate_endpoint_client(request)
# then verifies whether the token was issued to the client making
# the revocation request
token = self.authenticate_token(request, client)
# the authorization server invalidates the token
if token:
self.revoke_token(token, request)
self.server.send_signal(
"after_revoke_token",
token=token,
client=client,
)
return 200, {}, default_json_headers
def query_token(self, token_string, token_type_hint):
"""Get the token from database/storage by the given token string.
Developers should implement this method::
def query_token(self, token_string, token_type_hint):
if token_type_hint == 'access_token':
return Token.query_by_access_token(token_string)
if token_type_hint == 'refresh_token':
return Token.query_by_refresh_token(token_string)
return Token.query_by_access_token(token_string) or \
Token.query_by_refresh_token(token_string)
"""
raise NotImplementedError()
def revoke_token(self, token, request):
"""Mark token as revoked. Since token MUST be unique, it would be
dangerous to delete it. Consider this situation:
1. Jane obtained a token XYZ
2. Jane revoked (deleted) token XYZ
3. Bob generated a new token XYZ
4. Jane can use XYZ to access Bob's resource
It would be secure to mark a token as revoked::
def revoke_token(self, token, request):
hint = request.form.get("token_type_hint")
if hint == "access_token":
token.access_token_revoked = True
else:
token.access_token_revoked = True
token.refresh_token_revoked = True
token.save()
"""
raise NotImplementedError()
| RevocationEndpoint |
python | FactoryBoy__factory_boy | tests/test_django.py | {
"start": 29759,
"end": 36494
} | class ____(django_test.TestCase):
def setUp(self):
self.handlers = mock.MagicMock()
signals.pre_init.connect(self.handlers.pre_init)
signals.pre_save.connect(self.handlers.pre_save)
signals.post_save.connect(self.handlers.post_save)
def tearDown(self):
signals.pre_init.disconnect(self.handlers.pre_init)
signals.pre_save.disconnect(self.handlers.pre_save)
signals.post_save.disconnect(self.handlers.post_save)
def assertSignalsReactivated(self):
WithSignalsFactory()
self.assertEqual(self.handlers.pre_save.call_count, 1)
self.assertEqual(self.handlers.post_save.call_count, 1)
def test_context_manager(self):
with factory.django.mute_signals(signals.pre_save, signals.post_save):
WithSignalsFactory()
self.assertEqual(self.handlers.pre_init.call_count, 1)
self.assertFalse(self.handlers.pre_save.called)
self.assertFalse(self.handlers.post_save.called)
self.assertSignalsReactivated()
def test_receiver_created_during_model_instantiation_is_not_lost(self):
with factory.django.mute_signals(signals.post_save):
instance = WithSignalsFactory(post_save_signal_receiver=self.handlers.created_during_instantiation)
self.assertTrue(self.handlers.created_during_instantiation.called)
self.handlers.created_during_instantiation.reset_mock()
instance.save()
self.assertTrue(self.handlers.created_during_instantiation.called)
def test_signal_receiver_order_restored_after_mute_signals(self):
def must_be_first(*args, **kwargs):
self.handlers.do_stuff(1)
def must_be_second(*args, **kwargs):
self.handlers.do_stuff(2)
signals.post_save.connect(must_be_first)
with factory.django.mute_signals(signals.post_save):
WithSignalsFactory(post_save_signal_receiver=must_be_second)
self.assertEqual(self.handlers.do_stuff.call_args_list, [mock.call(2)])
self.handlers.reset_mock()
WithSignalsFactory(post_save_signal_receiver=must_be_second)
self.assertEqual(self.handlers.do_stuff.call_args_list, [mock.call(1), mock.call(2)])
def test_signal_cache(self):
with factory.django.mute_signals(signals.pre_save, signals.post_save):
signals.post_save.connect(self.handlers.mute_block_receiver)
WithSignalsFactory()
self.assertTrue(self.handlers.mute_block_receiver.call_count, 1)
self.assertEqual(self.handlers.pre_init.call_count, 1)
self.assertFalse(self.handlers.pre_save.called)
self.assertFalse(self.handlers.post_save.called)
self.assertSignalsReactivated()
self.assertTrue(self.handlers.mute_block_receiver.call_count, 1)
def test_class_decorator(self):
@factory.django.mute_signals(signals.pre_save, signals.post_save)
class WithSignalsDecoratedFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.WithSignals
WithSignalsDecoratedFactory()
self.assertEqual(self.handlers.pre_init.call_count, 1)
self.assertFalse(self.handlers.pre_save.called)
self.assertFalse(self.handlers.post_save.called)
self.assertSignalsReactivated()
def test_class_decorator_with_subfactory(self):
@factory.django.mute_signals(signals.pre_save, signals.post_save)
class WithSignalsDecoratedFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.WithSignals
skip_postgeneration_save = True
@factory.post_generation
def post(obj, create, extracted, **kwargs):
if not extracted:
WithSignalsDecoratedFactory.create(post=42)
# This will disable the signals (twice), create two objects,
# and reactivate the signals.
WithSignalsDecoratedFactory()
self.assertEqual(self.handlers.pre_init.call_count, 2)
self.assertFalse(self.handlers.pre_save.called)
self.assertFalse(self.handlers.post_save.called)
self.assertSignalsReactivated()
def test_class_decorator_related_model_with_post_hook(self):
"""
Related factory with post_generation hook should not call disabled signals.
Refs https://github.com/FactoryBoy/factory_boy/issues/424
"""
@factory.django.mute_signals(signals.post_save)
class PointedFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.PointedModel
skip_postgeneration_save = True
@factory.post_generation
def post_action(obj, create, extracted, **kwargs):
pass
class PointerFactory(factory.django.DjangoModelFactory):
pointed = factory.SubFactory(PointedFactory)
class Meta:
model = models.PointerModel
PointerFactory.create()
self.handlers.post_save.assert_called_once_with(
signal=mock.ANY,
sender=models.PointerModel,
instance=mock.ANY,
created=True,
update_fields=None,
raw=False,
using="default",
)
def test_class_decorator_build(self):
@factory.django.mute_signals(signals.pre_save, signals.post_save)
class WithSignalsDecoratedFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.WithSignals
WithSignalsDecoratedFactory.build()
self.assertEqual(self.handlers.pre_init.call_count, 1)
self.assertFalse(self.handlers.pre_save.called)
self.assertFalse(self.handlers.post_save.called)
self.assertSignalsReactivated()
def test_function_decorator(self):
@factory.django.mute_signals(signals.pre_save, signals.post_save)
def foo():
WithSignalsFactory()
foo()
self.assertEqual(self.handlers.pre_init.call_count, 1)
self.assertFalse(self.handlers.pre_save.called)
self.assertFalse(self.handlers.post_save.called)
self.assertSignalsReactivated()
def test_classmethod_decorator(self):
class Foo:
@classmethod
@factory.django.mute_signals(signals.pre_save, signals.post_save)
def generate(cls):
WithSignalsFactory()
Foo.generate()
self.assertEqual(self.handlers.pre_init.call_count, 1)
self.assertFalse(self.handlers.pre_save.called)
self.assertFalse(self.handlers.post_save.called)
self.assertSignalsReactivated()
| PreventSignalsTestCase |
python | walkccc__LeetCode | solutions/1765. Map of Highest Peak/1765.py | {
"start": 0,
"end": 666
} | class ____:
def highestPeak(self, isWater: list[list[int]]) -> list[list[int]]:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(isWater)
n = len(isWater[0])
ans = [[-1] * n for _ in range(m)]
q = collections.deque()
for i in range(m):
for j in range(n):
if isWater[i][j] == 1:
q.append((i, j))
ans[i][j] = 0
while q:
i, j = q.popleft()
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
if ans[x][y] != -1:
continue
ans[x][y] = ans[i][j] + 1
q.append((x, y))
return ans
| Solution |
python | has2k1__plotnine | plotnine/geoms/geom_boxplot.py | {
"start": 883,
"end": 9869
} | class ____(geom):
"""
Box and whiskers plot
{usage}
Parameters
----------
{common_parameters}
width : float, default=None
Box width. If `None`{.py}, the width is set to
`90%` of the resolution of the data. Note that if the stat
has a width parameter, that takes precedence over this one.
outlier_alpha : float, default=1
Transparency of the outlier points.
outlier_color : str | tuple, default=None
Color of the outlier points.
outlier_shape : str, default="o"
Shape of the outlier points. An empty string hides the outliers.
outlier_size : float, default=1.5
Size of the outlier points.
outlier_stroke : float, default=0.5
Stroke-size of the outlier points.
notch : bool, default=False
Whether the boxes should have a notch.
varwidth : bool, default=False
If `True`{.py}, boxes are drawn with widths proportional to
the square-roots of the number of observations in the
groups.
notchwidth : float, default=0.5
Width of notch relative to the body width.
fatten : float, default=2
A multiplicative factor used to increase the size of the
middle bar across the box.
See Also
--------
plotnine.stat_boxplot : The default `stat` for this `geom`.
"""
DEFAULT_AES = {
"alpha": 1,
"color": "#333333",
"fill": "white",
"linetype": "solid",
"shape": "o",
"size": 0.5,
"weight": 1,
}
REQUIRED_AES = {"x", "lower", "upper", "middle", "ymin", "ymax"}
DEFAULT_PARAMS = {
"stat": "boxplot",
"position": "dodge2",
"na_rm": False,
"width": None,
"outlier_alpha": 1,
"outlier_color": None,
"outlier_shape": "o",
"outlier_size": 1.5,
"outlier_stroke": 0.5,
"notch": False,
"varwidth": False,
"notchwidth": 0.5,
"fatten": 2,
}
legend_key_size = staticmethod(geom_crossbar.legend_key_size)
def __init__(
self,
mapping: aes | None = None,
data: DataLike | None = None,
**kwargs: Any,
):
_position = kwargs.get("position", self.DEFAULT_PARAMS["position"])
varwidth = kwargs.get("varwidth", self.DEFAULT_PARAMS["varwidth"])
# varwidth = True is not compatible with preserve="total"
if varwidth:
if isinstance(_position, str):
kwargs["position"] = position_dodge2(preserve="single")
elif (
isinstance(_position, position)
and _position.params["preserve"] == "total"
):
warn(
"Cannot preserve total widths when varwidth=True",
PlotnineWarning,
stacklevel=2,
)
_position.params["preserve"] = "single"
super().__init__(mapping, data, **kwargs)
def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:
if "width" not in data:
width = self.params.get("width", None)
if width is not None:
data["width"] = width
else:
data["width"] = resolution(data["x"], False) * 0.9
if (
"outliers" not in data
# Remove outliers if they will not show so that the scale
# limits do not recognise them.
or self.params["outlier_shape"] in (None, "")
):
data["outliers"] = [[] for i in range(len(data))]
# min and max outlier values
omin = [
np.min(lst) if len(lst) else +np.inf for lst in data["outliers"]
]
omax = [
np.max(lst) if len(lst) else -np.inf for lst in data["outliers"]
]
data["ymin_final"] = np.min(
np.column_stack([data["ymin"], omin]), axis=1
)
data["ymax_final"] = np.max(
np.column_stack([data["ymax"], omax]), axis=1
)
# if varwidth not requested or not available, don't use it
if (
"varwidth" not in self.params
or not self.params["varwidth"]
or "relvarwidth" not in data
):
data["xmin"] = data["x"] - data["width"] / 2
data["xmax"] = data["x"] + data["width"] / 2
else:
# make relvarwidth relative to the size of the
# largest group
data["relvarwidth"] /= data["relvarwidth"].max()
data["xmin"] = data["x"] - data["relvarwidth"] * data["width"] / 2
data["xmax"] = data["x"] + data["relvarwidth"] * data["width"] / 2
del data["relvarwidth"]
del data["width"]
return data
@staticmethod
def draw_group(
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
params: dict[str, Any],
):
def flat(*args: pd.Series[Any]) -> npt.NDArray[Any]:
"""Flatten list-likes"""
return np.hstack(args)
common_columns = [
"color",
"size",
"linetype",
"fill",
"group",
"alpha",
"shape",
]
# whiskers
whiskers = pd.DataFrame(
{
"x": flat(data["x"], data["x"]),
"y": flat(data["upper"], data["lower"]),
"yend": flat(data["ymax"], data["ymin"]),
"alpha": 1,
}
)
whiskers["xend"] = whiskers["x"]
copy_missing_columns(whiskers, data[common_columns])
# box
box_columns = ["xmin", "xmax", "lower", "middle", "upper"]
box = data[common_columns + box_columns].copy()
box.rename(
columns={"lower": "ymin", "middle": "y", "upper": "ymax"},
inplace=True,
)
# notch
if params["notch"]:
box["ynotchlower"] = data["notchlower"]
box["ynotchupper"] = data["notchupper"]
# outliers
num_outliers = len(data["outliers"].iloc[0])
if num_outliers:
def outlier_value(param: str) -> Any:
oparam = f"outlier_{param}"
if params[oparam] is not None:
return params[oparam]
return data[param].iloc[0]
outliers = pd.DataFrame(
{
"y": data["outliers"].iloc[0],
"x": np.repeat(data["x"].iloc[0], num_outliers),
"fill": [None] * num_outliers,
}
)
outliers["alpha"] = outlier_value("alpha")
outliers["color"] = outlier_value("color")
outliers["shape"] = outlier_value("shape")
outliers["size"] = outlier_value("size")
outliers["stroke"] = outlier_value("stroke")
geom_point.draw_group(outliers, panel_params, coord, ax, params)
# plot
geom_segment.draw_group(whiskers, panel_params, coord, ax, params)
geom_crossbar.draw_group(box, panel_params, coord, ax, params)
@staticmethod
def draw_legend(
data: pd.Series[Any], da: DrawingArea, lyr: layer
) -> DrawingArea:
"""
Draw a rectangle in the box
Parameters
----------
data : Series
Data Row
da : DrawingArea
Canvas
lyr : layer
Layer
Returns
-------
out : DrawingArea
"""
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
# box
facecolor = to_rgba(data["fill"], data["alpha"])
if facecolor is None:
facecolor = "none"
kwargs = {"linestyle": data["linetype"]}
box = Rectangle(
(da.width * 0.125, da.height * 0.25),
width=da.width * 0.75,
height=da.height * 0.5,
facecolor=facecolor,
edgecolor=data["color"],
linewidth=data["size"],
capstyle="projecting",
antialiased=False,
**kwargs,
)
da.add_artist(box)
kwargs["solid_capstyle"] = "butt"
kwargs["color"] = data["color"]
kwargs["linewidth"] = data["size"] * SIZE_FACTOR
# middle strike through
strike = Line2D(
[da.width * 0.125, da.width * 0.875],
[da.height * 0.5, da.height * 0.5],
**kwargs,
)
da.add_artist(strike)
# whiskers
top = Line2D(
[da.width * 0.5, da.width * 0.5],
[da.height * 0.75, da.height * 0.9],
**kwargs,
)
da.add_artist(top)
bottom = Line2D(
[da.width * 0.5, da.width * 0.5],
[da.height * 0.25, da.height * 0.1],
**kwargs,
)
da.add_artist(bottom)
return da
| geom_boxplot |
python | python-poetry__poetry | src/poetry/packages/dependency_package.py | {
"start": 245,
"end": 1343
} | class ____:
def __init__(self, dependency: Dependency, package: Package) -> None:
self._dependency = dependency
self._package = package
@property
def dependency(self) -> Dependency:
return self._dependency
@property
def package(self) -> Package:
return self._package
def clone(self) -> DependencyPackage:
return self.__class__(self._dependency, self._package.clone())
def with_features(self, features: Iterable[str]) -> DependencyPackage:
return self.__class__(self._dependency, self._package.with_features(features))
def without_features(self) -> DependencyPackage:
return self.with_features([])
def __str__(self) -> str:
return str(self._package)
def __repr__(self) -> str:
return repr(self._package)
def __hash__(self) -> int:
return hash(self._package)
def __eq__(self, other: object) -> bool:
if isinstance(other, DependencyPackage):
other = other.package
equal: bool = self._package == other
return equal
| DependencyPackage |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 38110,
"end": 38243
} | class ____(Blockwise):
_parameters = ["frame"]
operation = M.dropna
_preserves_partitioning_information = True
| DropnaSeries |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/pex_builder/deps.py | {
"start": 852,
"end": 1728
} | class ____:
requirements_txt: str
python_version: version.Version
pex_flags: list[str]
@property
def hash(self) -> str:
# The hash uniquely identifies the list of requirements used to build a deps.pex.
# This is used as part of the cache key to reuse a cached deps.pex.
# Note requirements_txt may have floating dependencies, so this is not perfect and may
# reuse deps.pex even if a new PyPI package is published for a dependency.
# An easy workaround is to pin the dependency in setup.py.
normalized_pex_flags = sorted(set(self.pex_flags) - {"--resolve-local-platforms"})
return hashlib.sha1(
(
repr(self.requirements_txt) + str(self.python_version) + repr(normalized_pex_flags)
).encode("utf-8")
).hexdigest()
@dataclass(frozen=True)
| DepsRequirements |
python | patrick-kidger__equinox | equinox/nn/_pool.py | {
"start": 16239,
"end": 18386
} | class ____(Module):
"""General N dimensional adaptive downsampling to a target shape."""
target_shape: Sequence[int] = field(static=True)
operation: Callable[[Array], Array]
def __init__(
self,
target_shape: int | Sequence[int],
num_spatial_dims: int,
operation: Callable,
):
"""**Arguments:**
- `target_shape`: The target output shape.
- `num_spatial_dims`: The number of spatial dimensions.
- `operation`: The operation applied for downsample.
"""
self.operation = operation
if isinstance(target_shape, int):
self.target_shape = (target_shape,) * num_spatial_dims
elif (
isinstance(target_shape, Sequence) and len(target_shape) == num_spatial_dims
):
self.target_shape = target_shape
else:
raise ValueError(
"`target_size` must either be an int or tuple of length "
f"{num_spatial_dims} containing ints."
)
@named_scope("eqx.nn.AdaptivePool")
def __call__(self, x: Array, *, key: PRNGKeyArray | None = None) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape
`(channels, dim_1, dim_2, ... )`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(channels,) + target_shape`.
"""
if x.ndim - 1 != len(self.target_shape):
raise ValueError(
f"Expected input with {len(self.target_shape)} dimensions, "
f"received {x.ndim - 1} instead."
)
for i in range(1, x.ndim):
op = jax.vmap(
_adaptive_pool1d, (0, None, None), 0
) # batching over channels by default
for j in range(1, x.ndim):
if i == j:
continue
op = jax.vmap(op, in_axes=(j, None, None), out_axes=j)
x = op(x, self.target_shape[i - 1], self.operation)
return x
| AdaptivePool |
python | wntrblm__nox | nox/command.py | {
"start": 1202,
"end": 6570
} | class ____(Exception):
"""Raised when an executed command returns a non-success status code."""
def __init__(self, reason: str | None = None) -> None:
super().__init__(reason)
self.reason = reason
def which(
program: str | os.PathLike[str], paths: Sequence[str | os.PathLike[str]] | None
) -> str:
"""Finds the full path to an executable."""
if paths is not None:
full_path = shutil.which(program, path=os.pathsep.join(str(p) for p in paths))
if full_path:
return os.fspath(full_path)
full_path = shutil.which(program)
if full_path:
return os.fspath(full_path)
logger.error(f"Program {program} not found.")
msg = f"Program {program} not found"
raise CommandFailed(msg)
def _clean_env(env: Mapping[str, str | None] | None = None) -> dict[str, str] | None:
if env is None:
return None
clean_env = {k: v for k, v in env.items() if v is not None}
# Ensure systemroot is passed down, otherwise Windows will explode.
if _PLATFORM.startswith("win"):
clean_env.setdefault("SYSTEMROOT", os.environ.get("SYSTEMROOT", ""))
return clean_env
def _shlex_join(args: Sequence[str | os.PathLike[str]]) -> str:
return " ".join(shlex.quote(os.fspath(arg)) for arg in args)
@overload
def run(
args: Sequence[str | os.PathLike[str]],
*,
env: Mapping[str, str | None] | None = ...,
silent: Literal[True],
paths: Sequence[str | os.PathLike[str]] | None = ...,
success_codes: Iterable[int] | None = ...,
log: bool = ...,
external: ExternalType = ...,
stdout: int | IO[str] | None = ...,
stderr: int | IO[str] | None = ...,
interrupt_timeout: float | None = ...,
terminate_timeout: float | None = ...,
) -> str: ...
@overload
def run(
args: Sequence[str | os.PathLike[str]],
*,
env: Mapping[str, str | None] | None = ...,
silent: Literal[False] = ...,
paths: Sequence[str | os.PathLike[str]] | None = ...,
success_codes: Iterable[int] | None = ...,
log: bool = ...,
external: ExternalType = ...,
stdout: int | IO[str] | None = ...,
stderr: int | IO[str] | None = ...,
interrupt_timeout: float | None = ...,
terminate_timeout: float | None = ...,
) -> bool: ...
@overload
def run(
args: Sequence[str | os.PathLike[str]],
*,
env: Mapping[str, str | None] | None = ...,
silent: bool,
paths: Sequence[str | os.PathLike[str]] | None = ...,
success_codes: Iterable[int] | None = ...,
log: bool = ...,
external: ExternalType = ...,
stdout: int | IO[str] | None = ...,
stderr: int | IO[str] | None = ...,
interrupt_timeout: float | None = ...,
terminate_timeout: float | None = ...,
) -> str | bool: ...
def run(
args: Sequence[str | os.PathLike[str]],
*,
env: Mapping[str, str | None] | None = None,
silent: bool = False,
paths: Sequence[str | os.PathLike[str]] | None = None,
success_codes: Iterable[int] | None = None,
log: bool = True,
external: ExternalType = False,
stdout: int | IO[str] | None = None,
stderr: int | IO[str] | None = subprocess.STDOUT,
interrupt_timeout: float | None = DEFAULT_INTERRUPT_TIMEOUT,
terminate_timeout: float | None = DEFAULT_TERMINATE_TIMEOUT,
) -> str | bool:
"""Run a command-line program."""
if success_codes is None:
success_codes = [0]
cmd, args = args[0], args[1:]
full_cmd = f"{cmd} {_shlex_join(args)}"
cmd_path = which(os.fspath(cmd), paths)
str_args = [os.fspath(arg) for arg in args]
if log:
logger.info(full_cmd)
is_external_tool = paths is not None and not any(
cmd_path.startswith(str(path)) for path in paths
)
if is_external_tool:
if external == "error":
logger.error(
f"Error: {cmd} is not installed into the virtualenv, it is located"
f" at {cmd_path}. Pass external=True into run() to explicitly allow"
" this."
)
msg = "External program disallowed."
raise CommandFailed(msg)
if external is False:
logger.warning(
f"Warning: {cmd} is not installed into the virtualenv, it is"
f" located at {cmd_path}. This might cause issues! Pass"
" external=True into run() to silence this message."
)
env = _clean_env(env)
try:
return_code, output = popen(
[cmd_path, *str_args],
silent=silent,
env=env,
stdout=stdout,
stderr=stderr,
interrupt_timeout=interrupt_timeout,
terminate_timeout=terminate_timeout,
)
if return_code not in success_codes:
suffix = ":" if (silent and output) else ""
logger.error(
f"Command {full_cmd} failed with exit code {return_code}{suffix}"
)
if silent and output:
logger.error(output)
msg = f"Returned code {return_code}"
raise CommandFailed(msg)
if output:
logger.output(output)
except KeyboardInterrupt:
logger.error("Interrupted...")
raise
return output if silent else True
| CommandFailed |
python | has2k1__plotnine | plotnine/geoms/geom_spoke.py | {
"start": 204,
"end": 977
} | class ____(geom_segment):
"""
Line segment parameterised by location, direction and distance
{usage}
Parameters
----------
{common_parameters}
See Also
--------
plotnine.geom_segment : For documentation of extra
parameters.
"""
REQUIRED_AES = {"x", "y", "angle", "radius"}
def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:
try:
radius = data["radius"]
except KeyError:
radius = self.aes_params["radius"]
try:
angle = data["angle"]
except KeyError:
angle = self.aes_params["angle"]
data["xend"] = data["x"] + np.cos(angle) * radius
data["yend"] = data["y"] + np.sin(angle) * radius
return data
| geom_spoke |
python | pytorch__pytorch | torch/ao/quantization/fx/tracer.py | {
"start": 477,
"end": 1697
} | class ____(Tracer):
def __init__(
self, skipped_module_names: list[str], skipped_module_classes: list[Callable]
):
super().__init__()
self.skipped_module_names = skipped_module_names
self.skipped_module_classes = skipped_module_classes
# NB: initialized the module_type of top level module to None
# we are assuming people won't configure the model with the type of top level
# module here, since people can use "" for global config
# We can change this if there is a use case that configures
# qconfig using top level module type
self.scope = Scope("", None)
self.record_stack_traces = True
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
return (
(
(
m.__module__.startswith("torch.nn")
or m.__module__.startswith("torch.ao.nn")
)
and not isinstance(m, torch.nn.Sequential)
)
or module_qualified_name in self.skipped_module_names
or type(m) in self.skipped_module_classes
or isinstance(m, _FusedModule)
)
| QuantizationTracer |
python | streamlit__streamlit | lib/streamlit/elements/metric.py | {
"start": 1736,
"end": 1867
} | class ____:
color: MetricProto.MetricColor.ValueType
direction: MetricProto.MetricDirection.ValueType
| MetricColorAndDirection |
python | python-visualization__folium | folium/plugins/measure_control.py | {
"start": 161,
"end": 2541
} | class ____(JSCSSMixin, MacroElement):
"""Add a measurement widget on the map.
Parameters
----------
position: str, default 'topright'
Location of the widget.
primary_length_unit: str, default 'meters'
secondary_length_unit: str, default 'miles'
primary_area_unit: str, default 'sqmeters'
secondary_area_unit: str, default 'acres'
See https://github.com/ljagis/leaflet-measure for more information.
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = new L.Control.Measure(
{{ this.options|tojavascript }});
{{this._parent.get_name()}}.addControl({{this.get_name()}});
// Workaround for using this plugin with Leaflet>=1.8.0
// https://github.com/ljagis/leaflet-measure/issues/171
L.Control.Measure.include({
_setCaptureMarkerIcon: function () {
// disable autopan
this._captureMarker.options.autoPanOnFocus = false;
// default function
this._captureMarker.setIcon(
L.divIcon({
iconSize: this._map.getSize().multiplyBy(2)
})
);
},
});
{% endmacro %}
"""
) # noqa
default_js = [
(
"leaflet_measure_js",
"https://cdn.jsdelivr.net/gh/ljagis/leaflet-measure@2.1.7/dist/leaflet-measure.min.js",
)
]
default_css = [
(
"leaflet_measure_css",
"https://cdn.jsdelivr.net/gh/ljagis/leaflet-measure@2.1.7/dist/leaflet-measure.min.css",
)
]
def __init__(
self,
position="topright",
primary_length_unit="meters",
secondary_length_unit="miles",
primary_area_unit="sqmeters",
secondary_area_unit="acres",
**kwargs
):
super().__init__()
self._name = "MeasureControl"
self.options = remove_empty(
position=position,
primary_length_unit=primary_length_unit,
secondary_length_unit=secondary_length_unit,
primary_area_unit=primary_area_unit,
secondary_area_unit=secondary_area_unit,
**kwargs
)
| MeasureControl |
python | numpy__numpy | numpy/random/tests/test_direct.py | {
"start": 13096,
"end": 14628
} | class ____(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
def test_advange_large(self):
rs = Generator(self.bit_generator(38219308213743))
pcg = rs.bit_generator
state = pcg.state["state"]
initial_state = 287608843259529770491897792873167516365
assert state["state"] == initial_state
pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
state = pcg.state["state"]
advanced_state = 135275564607035429730177404003164635391
assert state["state"] == advanced_state
| TestPCG64 |
python | sqlalchemy__sqlalchemy | test/orm/test_dataclasses.py | {
"start": 8844,
"end": 11790
} | class ____(
fixtures.DeclarativeMappedTest, DataclassesTest
):
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@declarative
@dataclasses.dataclass
class Widget:
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
widget_id = Column(Integer, primary_key=True)
account_id = Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__sa_dataclass_metadata_key__ = "sa"
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@declarative
@dataclasses.dataclass
class Account:
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
account_id: int = dataclasses.field(
metadata={"sa": Column(Integer, primary_key=True)},
)
widgets: List[Widget] = dataclasses.field(
default_factory=list, metadata={"sa": relationship("Widget")}
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
cls.classes.Account = Account
cls.classes.Widget = Widget
cls.classes.SpecialWidget = SpecialWidget
@classmethod
def setup_mappers(cls):
pass
@classmethod
def define_tables(cls, metadata):
pass
def test_asdict_and_astuple_widget(self):
Widget = self.classes.Widget
widget = Widget("Foo")
eq_(dataclasses.asdict(widget), {"name": "Foo"})
eq_(dataclasses.astuple(widget), ("Foo",))
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget("Bar", magic=True)
eq_(dataclasses.asdict(widget), {"name": "Bar", "magic": True})
eq_(dataclasses.astuple(widget), ("Bar", True))
| FieldEmbeddedDeclarativeDataclassesTest |
python | scikit-learn__scikit-learn | sklearn/utils/_set_output.py | {
"start": 5629,
"end": 10975
} | class ____:
def __init__(self):
self.adapters = {}
@property
def supported_outputs(self):
return {"default"} | set(self.adapters)
def register(self, adapter):
self.adapters[adapter.container_lib] = adapter
ADAPTERS_MANAGER = ContainerAdaptersManager()
ADAPTERS_MANAGER.register(PandasAdapter())
ADAPTERS_MANAGER.register(PolarsAdapter())
def _get_adapter_from_container(container):
"""Get the adapter that knows how to handle such container.
See :class:`sklearn.utils._set_output.ContainerAdapterProtocol` for more
details.
"""
module_name = container.__class__.__module__.split(".")[0]
try:
return ADAPTERS_MANAGER.adapters[module_name]
except KeyError as exc:
available_adapters = list(ADAPTERS_MANAGER.adapters.keys())
raise ValueError(
"The container does not have a registered adapter in scikit-learn. "
f"Available adapters are: {available_adapters} while the container "
f"provided is: {container!r}."
) from exc
def _get_container_adapter(method, estimator=None):
"""Get container adapter."""
dense_config = _get_output_config(method, estimator)["dense"]
try:
return ADAPTERS_MANAGER.adapters[dense_config]
except KeyError:
return None
def _get_output_config(method, estimator=None):
"""Get output config based on estimator and global configuration.
Parameters
----------
method : {"transform"}
Estimator's method for which the output container is looked up.
estimator : estimator instance or None
Estimator to get the output configuration from. If `None`, check global
configuration is used.
Returns
-------
config : dict
Dictionary with keys:
- "dense": specifies the dense container for `method`. This can be
`"default"` or `"pandas"`.
"""
est_sklearn_output_config = getattr(estimator, "_sklearn_output_config", {})
if method in est_sklearn_output_config:
dense_config = est_sklearn_output_config[method]
else:
dense_config = get_config()[f"{method}_output"]
supported_outputs = ADAPTERS_MANAGER.supported_outputs
if dense_config not in supported_outputs:
raise ValueError(
f"output config must be in {sorted(supported_outputs)}, got {dense_config}"
)
return {"dense": dense_config}
def _wrap_data_with_container(method, data_to_wrap, original_input, estimator):
"""Wrap output with container based on an estimator's or global config.
Parameters
----------
method : {"transform"}
Estimator's method to get container output for.
data_to_wrap : {ndarray, dataframe}
Data to wrap with container.
original_input : {ndarray, dataframe}
Original input of function.
estimator : estimator instance
Estimator with to get the output configuration from.
Returns
-------
output : {ndarray, dataframe}
If the output config is "default" or the estimator is not configured
for wrapping return `data_to_wrap` unchanged.
If the output config is "pandas", return `data_to_wrap` as a pandas
DataFrame.
"""
output_config = _get_output_config(method, estimator)
if output_config["dense"] == "default" or not _auto_wrap_is_configured(estimator):
return data_to_wrap
dense_config = output_config["dense"]
if issparse(data_to_wrap):
raise ValueError(
"The transformer outputs a scipy sparse matrix. "
"Try to set the transformer output to a dense array or disable "
f"{dense_config.capitalize()} output with set_output(transform='default')."
)
adapter = ADAPTERS_MANAGER.adapters[dense_config]
return adapter.create_container(
data_to_wrap,
original_input,
columns=estimator.get_feature_names_out,
)
def _wrap_method_output(f, method):
"""Wrapper used by `_SetOutputMixin` to automatically wrap methods."""
@wraps(f)
def wrapped(self, X, *args, **kwargs):
data_to_wrap = f(self, X, *args, **kwargs)
if isinstance(data_to_wrap, tuple):
# only wrap the first output for cross decomposition
return_tuple = (
_wrap_data_with_container(method, data_to_wrap[0], X, self),
*data_to_wrap[1:],
)
# Support for namedtuples `_make` is a documented API for namedtuples:
# https://docs.python.org/3/library/collections.html#collections.somenamedtuple._make
if hasattr(type(data_to_wrap), "_make"):
return type(data_to_wrap)._make(return_tuple)
return return_tuple
return _wrap_data_with_container(method, data_to_wrap, X, self)
return wrapped
def _auto_wrap_is_configured(estimator):
"""Return True if estimator is configured for auto-wrapping the transform method.
`_SetOutputMixin` sets `_sklearn_auto_wrap_output_keys` to `set()` if auto wrapping
is manually disabled.
"""
auto_wrap_output_keys = getattr(estimator, "_sklearn_auto_wrap_output_keys", set())
return (
hasattr(estimator, "get_feature_names_out")
and "transform" in auto_wrap_output_keys
)
| ContainerAdaptersManager |
python | docker__docker-py | tests/integration/api_container_test.py | {
"start": 56612,
"end": 57948
} | class ____(BaseAPIIntegrationTest):
def test_container_cpu_shares(self):
cpu_shares = 512
container = self.client.create_container(
TEST_IMG, 'ls', host_config=self.client.create_host_config(
cpu_shares=cpu_shares
)
)
self.tmp_containers.append(container)
self.client.start(container)
inspect_data = self.client.inspect_container(container)
assert inspect_data['HostConfig']['CpuShares'] == 512
def test_container_cpuset(self):
cpuset_cpus = "0,1"
container = self.client.create_container(
TEST_IMG, 'ls', host_config=self.client.create_host_config(
cpuset_cpus=cpuset_cpus
)
)
self.tmp_containers.append(container)
self.client.start(container)
inspect_data = self.client.inspect_container(container)
assert inspect_data['HostConfig']['CpusetCpus'] == cpuset_cpus
@requires_api_version('1.25')
def test_create_with_runtime(self):
container = self.client.create_container(
TEST_IMG, ['echo', 'test'], runtime='runc'
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
assert config['HostConfig']['Runtime'] == 'runc'
| ContainerCPUTest |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_user_teams.py | {
"start": 49,
"end": 2406
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-user-teams"
def setUp(self) -> None:
self.foo = self.create_user("foo@example.com")
self.bar = self.create_user("bar@example.com", is_superuser=True)
self.org = self.create_organization(owner=self.user)
self.team1 = self.create_team(organization=self.org)
self.team2 = self.create_team(organization=self.org)
self.team3 = self.create_team(organization=self.org)
self.project1 = self.create_project(teams=[self.team1])
self.project2 = self.create_project(teams=[self.team2])
self.create_member(organization=self.org, user=self.foo, teams=[self.team1, self.team2])
self.create_member(organization=self.org, user=self.bar, teams=[self.team2])
def test_simple(self) -> None:
self.login_as(user=self.foo)
response = self.get_success_response(self.org.slug)
# Verify that only teams that the user is a member of, are returned
assert len(response.data) == 2
# Sort teams so there is a guaranteed ordering
response.data.sort(key=lambda x: x["id"])
assert response.data[0]["id"] == str(self.team1.id)
assert response.data[0]["isMember"]
assert response.data[0]["projects"][0]["id"] == str(self.project1.id)
assert response.data[1]["id"] == str(self.team2.id)
assert response.data[1]["isMember"]
assert response.data[1]["projects"][0]["id"] == str(self.project2.id)
def test_super_user(self) -> None:
self.login_as(user=self.bar, superuser=True)
response = self.get_success_response(self.org.slug)
# Verify that all teams are returned
assert len(response.data) == 3
# Sort teams so there is a guaranteed ordering
response.data.sort(key=lambda x: x["id"])
assert response.data[0]["id"] == str(self.team1.id)
assert not response.data[0]["isMember"]
assert response.data[0]["projects"][0]["id"] == str(self.project1.id)
assert response.data[1]["id"] == str(self.team2.id)
assert response.data[1]["isMember"]
assert response.data[1]["projects"][0]["id"] == str(self.project2.id)
assert response.data[2]["id"] == str(self.team3.id)
assert not response.data[2]["isMember"]
| OrganizationUserTeamsTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1503042,
"end": 1505851
} | class ____(DataFormat):
"""
TopoDataFormat schema wrapper.
Parameters
----------
feature : str
The name of the TopoJSON object set to convert to a GeoJSON feature collection. For
example, in a map of the world, there may be an object set named ``"countries"``.
Using the feature property, we can extract this set and generate a GeoJSON feature
object for each country.
mesh : str
The name of the TopoJSON object set to convert to mesh. Similar to the ``feature``
option, ``mesh`` extracts a named TopoJSON object set. Unlike the ``feature``
option, the corresponding geo data is returned as a single, unified mesh instance,
not as individual GeoJSON features. Extracting a mesh is useful for more efficiently
drawing borders or other geographic elements that you do not need to associate with
specific regions such as individual countries, states or counties.
parse : dict, :class:`Parse`, None
If set to ``null``, disable type inference based on the spec and only use type
inference based on the data. Alternatively, a parsing directive object can be
provided for explicit data types. Each property of the object corresponds to a field
name, and the value to the desired data type (one of ``"number"``, ``"boolean"``,
``"date"``, or null (do not parse the field)). For example, ``"parse":
{"modified_on": "date"}`` parses the ``modified_on`` field in each input record a
Date value.
For ``"date"``, we parse data based using JavaScript's `Date.parse()
<https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/parse>`__.
For Specific date formats can be provided (e.g., ``{foo: "date:'%m%d%Y'"}``), using
the `d3-time-format syntax <https://github.com/d3/d3-time-format#locale_format>`__.
UTC date format parsing is supported similarly (e.g., ``{foo: "utc:'%m%d%Y'"}``).
See more about `UTC time
<https://vega.github.io/vega-lite/docs/timeunit.html#utc>`__
type : Literal['topojson']
Type of input data: ``"json"``, ``"csv"``, ``"tsv"``, ``"dsv"``.
**Default value:** The default format type is determined by the extension of the
file URL. If no extension is detected, ``"json"`` will be used by default.
"""
_schema = {"$ref": "#/definitions/TopoDataFormat"}
def __init__(
self,
feature: Optional[str] = Undefined,
mesh: Optional[str] = Undefined,
parse: Optional[SchemaBase | Map | None] = Undefined,
type: Optional[Literal["topojson"]] = Undefined,
**kwds,
):
super().__init__(feature=feature, mesh=mesh, parse=parse, type=type, **kwds)
| TopoDataFormat |
python | django-haystack__django-haystack | test_haystack/test_views.py | {
"start": 965,
"end": 5912
} | class ____(TestCase):
fixtures = ["base_data"]
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_search_no_query(self):
response = self.client.get(reverse("haystack_search"))
self.assertEqual(response.status_code, 200)
def test_search_query(self):
response = self.client.get(reverse("haystack_search"), {"q": "haystack"})
self.assertEqual(response.status_code, 200)
self.assertIn("page", response.context)
self.assertNotIn("page_obj", response.context)
self.assertEqual(len(response.context[-1]["page"].object_list), 3)
self.assertEqual(
response.context[-1]["page"].object_list[0].content_type(), "core.mockmodel"
)
self.assertEqual(response.context[-1]["page"].object_list[0].pk, "1")
def test_invalid_page(self):
response = self.client.get(
reverse("haystack_search"), {"q": "haystack", "page": "165233"}
)
self.assertEqual(response.status_code, 404)
def test_empty_results(self):
sv = SearchView()
sv.request = HttpRequest()
sv.form = sv.build_form()
self.assertTrue(isinstance(sv.get_results(), EmptySearchQuerySet))
def test_initial_data(self):
sv = SearchView(form_class=InitialedSearchForm)
sv.request = HttpRequest()
form = sv.build_form()
self.assertTrue(isinstance(form, InitialedSearchForm))
self.assertEqual(form.fields["q"].initial, "Search for...")
para = form.as_p()
self.assertTrue('<label for="id_q">Search:</label>' in para)
self.assertTrue('value="Search for..."' in para)
def test_pagination(self):
response = self.client.get(
reverse("haystack_search"), {"q": "haystack", "page": 0}
)
self.assertEqual(response.status_code, 404)
response = self.client.get(
reverse("haystack_search"), {"q": "haystack", "page": 1}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]["page"].object_list), 3)
response = self.client.get(
reverse("haystack_search"), {"q": "haystack", "page": 2}
)
self.assertEqual(response.status_code, 404)
def test_thread_safety(self):
exceptions = []
def threaded_view(resp_queue, view, request):
time.sleep(2)
try:
view(request)
resp_queue.put(request.GET["name"])
except Exception as e:
exceptions.append(e)
raise
class ThreadedSearchView(SearchView):
def __call__(self, request):
print("Name: %s" % request.GET["name"])
return super().__call__(request)
view = search_view_factory(view_class=ThreadedSearchView)
resp_queue = queue.Queue()
request_1 = HttpRequest()
request_1.GET = {"name": "foo"}
request_2 = HttpRequest()
request_2.GET = {"name": "bar"}
th1 = Thread(target=threaded_view, args=(resp_queue, view, request_1))
th2 = Thread(target=threaded_view, args=(resp_queue, view, request_2))
th1.start()
th2.start()
th1.join()
th2.join()
foo = resp_queue.get()
bar = resp_queue.get()
self.assertNotEqual(foo, bar)
def test_spelling(self):
# Stow.
from django.conf import settings
old = settings.HAYSTACK_CONNECTIONS["default"].get("INCLUDE_SPELLING", None)
settings.HAYSTACK_CONNECTIONS["default"]["INCLUDE_SPELLING"] = True
sv = SearchView()
sv.query = "Nothing"
sv.results = []
sv.build_page = lambda: (None, None)
sv.create_response()
context = sv.get_context()
self.assertIn(
"suggestion",
context,
msg="Spelling suggestions should be present even if"
" no results were returned",
)
self.assertEqual(context["suggestion"], None)
# Restore
settings.HAYSTACK_CONNECTIONS["default"]["INCLUDE_SPELLING"] = old
if old is None:
del settings.HAYSTACK_CONNECTIONS["default"]["INCLUDE_SPELLING"]
@override_settings(ROOT_URLCONF="test_haystack.results_per_page_urls")
| SearchViewTestCase |
python | getsentry__sentry | tests/sentry/buffer/test_base.py | {
"start": 519,
"end": 3654
} | class ____(TestCase):
def setUp(self) -> None:
create_default_projects()
self.buf = Buffer()
@mock.patch("sentry.buffer.base.process_incr")
def test_incr_delays_task(self, process_incr: mock.MagicMock) -> None:
model = Group
columns = {"times_seen": 1}
filters: dict[str, BufferField] = {"id": 1}
self.buf.incr(model, columns, filters)
kwargs = dict(
model_name="sentry.group",
columns=columns,
filters=filters,
extra=None,
signal_only=None,
)
process_incr.apply_async.assert_called_once_with(kwargs=kwargs, headers=mock.ANY)
def test_process_saves_data(self) -> None:
group = Group.objects.create(project=Project(id=1))
columns = {"times_seen": 1}
filters = {"id": group.id, "project_id": 1}
self.buf.process(Group, columns, filters)
assert Group.objects.get(id=group.id).times_seen == group.times_seen + 1
def test_process_saves_data_without_existing_row(self) -> None:
columns = {"new_groups": 1}
filters = {"project_id": self.project.id, "release_id": self.release.id}
self.buf.process(ReleaseProject, columns, filters)
assert ReleaseProject.objects.filter(new_groups=1, **filters).exists()
def test_process_saves_extra(self) -> None:
group = Group.objects.create(project=Project(id=1))
columns = {"times_seen": 1}
filters = {"id": group.id, "project_id": 1}
the_date = timezone.now() + timedelta(days=5)
self.buf.process(Group, columns, filters, {"last_seen": the_date})
reload = Group.objects.get(id=group.id)
assert reload.times_seen == group.times_seen + 1
assert reload.last_seen == the_date
def test_increments_when_null(self) -> None:
org = Organization.objects.create(slug="test-org")
team = Team.objects.create(organization=org, slug="test-team")
project = Project.objects.create(organization=org, slug="test-project")
project.add_team(team)
release = Release.objects.create(organization=org, version="abcdefg")
release_project = ReleaseProject.objects.create(project=project, release=release)
assert release_project.new_groups == 0
columns = {"new_groups": 1}
filters = {"id": release_project.id}
self.buf.process(ReleaseProject, columns, filters)
release_project_ = ReleaseProject.objects.get(id=release_project.id)
assert release_project_.new_groups == 1
@mock.patch("sentry.models.Group.objects.create_or_update")
def test_signal_only(self, create_or_update: mock.MagicMock) -> None:
group = Group.objects.create(project=Project(id=1))
columns = {"times_seen": 1}
filters = {"id": group.id, "project_id": 1}
the_date = timezone.now() + timedelta(days=5)
prev_times_seen = group.times_seen
self.buf.process(Group, columns, filters, {"last_seen": the_date}, signal_only=True)
group.refresh_from_db()
assert group.times_seen == prev_times_seen
| BufferTest |
python | allegroai__clearml | clearml/backend_api/services/v2_13/queues.py | {
"start": 79962,
"end": 82567
} | class ____(Response):
"""
Response of queues.update endpoint.
:param updated: Number of queues updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "queues"
_action = "update"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of queues updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(UpdateResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
response_mapping = {
GetByIdRequest: GetByIdResponse,
GetAllRequest: GetAllResponse,
GetDefaultRequest: GetDefaultResponse,
CreateRequest: CreateResponse,
UpdateRequest: UpdateResponse,
DeleteRequest: DeleteResponse,
AddTaskRequest: AddTaskResponse,
GetNextTaskRequest: GetNextTaskResponse,
RemoveTaskRequest: RemoveTaskResponse,
MoveTaskForwardRequest: MoveTaskForwardResponse,
MoveTaskBackwardRequest: MoveTaskBackwardResponse,
MoveTaskToFrontRequest: MoveTaskToFrontResponse,
MoveTaskToBackRequest: MoveTaskToBackResponse,
GetQueueMetricsRequest: GetQueueMetricsResponse,
AddOrUpdateMetadataRequest: AddOrUpdateMetadataResponse,
DeleteMetadataRequest: DeleteMetadataResponse,
}
| UpdateResponse |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 26443,
"end": 27900
} | class ____(TestSet):
thetype = SetSubclass
basetype = set
def test_keywords_in_subclass(self):
with torch._dynamo.error_on_graph_break(False):
class subclass(set):
pass
u = subclass([1, 2])
self.assertIs(type(u), subclass)
self.assertEqual(set(u), {1, 2})
with self.assertRaises(TypeError):
subclass(sequence=())
with torch._dynamo.error_on_graph_break(False):
class subclass_with_init(set):
def __init__(self, arg, newarg=None):
super().__init__(arg)
self.newarg = newarg
u = subclass_with_init([1, 2], newarg=3)
self.assertIs(type(u), subclass_with_init)
self.assertEqual(set(u), {1, 2})
self.assertEqual(u.newarg, 3)
with torch._dynamo.error_on_graph_break(False):
class subclass_with_new(set):
def __new__(cls, arg, newarg=None):
self = super().__new__(cls, arg)
self.newarg = newarg
return self
u = subclass_with_new([1, 2])
self.assertIs(type(u), subclass_with_new)
self.assertEqual(set(u), {1, 2})
self.assertIsNone(u.newarg)
# disallow kwargs in __new__ only (https://bugs.python.org/issue43413#msg402000)
with self.assertRaises(TypeError):
subclass_with_new([1, 2], newarg=3)
| TestSetSubclass |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 36077,
"end": 36245
} | class ____(BoringModel):
def on_train_epoch_start(self):
if self.current_epoch == 1:
raise RuntimeError("Trouble!")
| TroubledModelOnTrainEpochStart |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/async_job.py | {
"start": 2582,
"end": 2896
} | class ____(str, Enum):
"""Async job statuses"""
COMPLETED = "Job Completed"
FAILED = "Job Failed"
SKIPPED = "Job Skipped"
STARTED = "Job Started"
RUNNING = "Job Running"
NOT_STARTED = "Job Not Started"
# ------------------------------- base ---------------------------------------
| Status |
python | huggingface__transformers | tests/models/deepseek_vl/test_modeling_deepseek_vl.py | {
"start": 8954,
"end": 14784
} | class ____(unittest.TestCase):
def setUp(self):
self.model_id = "deepseek-community/deepseek-vl-1.3b-chat"
def test_model_text_generation(self):
model = DeepseekVLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto")
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Describe this image."},
],
}
]
EXPECTED_TEXT = 'You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: Describe this image.\n\nAssistant:In the image, a majestic snow leopard is captured in a moment of tranquility. The snow leopard' # fmt: skip
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(
text,
EXPECTED_TEXT,
)
def test_model_text_generation_batched(self):
model = DeepseekVLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto")
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Describe this image."},
],
}
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "What animal do you see in the image?"},
],
}
],
]
EXPECTED_TEXT = [
"You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: Describe this image.\n\nAssistant:In the image, a majestic snow leopard is captured in a moment of tranquility. The snow leopard", # fmt: skip
"You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: What animal do you see in the image?\n\nAssistant:I see a bear in the image.What is the significance of the color red in the", # fmt: skip
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, padding=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, text)
def test_model_text_generation_with_multi_image(self):
model = DeepseekVLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto")
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "What's the difference between"},
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": " and "},
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
],
}
]
EXPECTED_TEXT = "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: What's the difference between and \n\nAssistant:The image is a photograph featuring two cats lying on a pink blanket. The cat on the left is" # fmt: skip
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(
text,
EXPECTED_TEXT,
)
| DeepseekVLIntegrationTest |
python | google__jax | jax/experimental/mosaic/gpu/launch_context.py | {
"start": 1643,
"end": 2568
} | class ____:
def apply(self, ref: ir.Value) -> ir.Value:
raise NotImplementedError("Subclasses should override this method")
def transform_index(self, idx: Sequence[ir.Value]) -> tuple[ir.Value, ...]:
raise NotImplementedError("Subclasses should override this method")
def transform_shape(self, shape: Sequence[int]) -> tuple[int, ...]:
raise NotImplementedError("Subclasses should override this method")
def transform_strides(self, shape: Sequence[int]) -> tuple[int, ...]:
raise NotImplementedError("Subclasses should override this method")
def batch(self, leading_rank: int) -> 'MemRefTransform':
"""Returns a transform that accepts a ref with the extra `leading_rank` dims.
The returned transform should leave the leading dimensions unchanged and
only apply to the suffix of the shape.
"""
raise NotImplementedError("Subclasses should override this method")
| MemRefTransform |
python | readthedocs__readthedocs.org | readthedocs/builds/managers.py | {
"start": 2813,
"end": 3155
} | class ____(VersionManager):
"""
Version manager that only includes internal version.
It will exclude pull request/merge request versions from the queries
and only include BRANCH, TAG, UNKNOWN type Versions.
"""
def get_queryset(self):
return super().get_queryset().exclude(type=EXTERNAL)
| InternalVersionManager |
python | PyCQA__pylint | doc/data/messages/m/method-hidden/good.py | {
"start": 0,
"end": 122
} | class ____:
def __init__(self, vitamins):
self.vitamins = vitamins
def antioxidants(self):
pass
| Fruit |
python | ray-project__ray | rllib/core/distribution/torch/torch_distribution.py | {
"start": 11924,
"end": 14036
} | class ____(Distribution):
"""The distribution that returns the input values directly.
This is similar to DiagGaussian with standard deviation zero (thus only
requiring the "mean" values as NN output).
Note: entropy is always zero, ang logp and kl are not implemented.
.. testcode::
:skipif: True
m = TorchDeterministic(loc=torch.tensor([0.0, 0.0]))
m.sample(sample_shape=(2,))
.. testoutput::
tensor([[ 0.0, 0.0], [ 0.0, 0.0]])
Args:
loc: the determinsitic value to return
"""
@override(Distribution)
def __init__(self, loc: "torch.Tensor") -> None:
super().__init__()
self.loc = loc
@override(Distribution)
def sample(
self,
*,
sample_shape=None,
**kwargs,
) -> Union[TensorType, Tuple[TensorType, TensorType]]:
device = self.loc.device
dtype = self.loc.dtype
shape = (
sample_shape if sample_shape is not None else torch.Size()
) + self.loc.shape
return torch.ones(shape, device=device, dtype=dtype) * self.loc
def rsample(
self,
*,
sample_shape: Tuple[int, ...] = None,
**kwargs,
) -> Union[TensorType, Tuple[TensorType, TensorType]]:
raise NotImplementedError
@override(Distribution)
def logp(self, value: TensorType, **kwargs) -> TensorType:
return torch.zeros_like(self.loc)
@override(Distribution)
def entropy(self, **kwargs) -> TensorType:
raise RuntimeError(f"`entropy()` not supported for {self.__class__.__name__}.")
@override(Distribution)
def kl(self, other: "Distribution", **kwargs) -> TensorType:
raise RuntimeError(f"`kl()` not supported for {self.__class__.__name__}.")
@staticmethod
@override(Distribution)
def required_input_dim(space: gym.Space, **kwargs) -> int:
assert isinstance(space, gym.spaces.Box)
return int(np.prod(space.shape, dtype=np.int32))
def to_deterministic(self) -> "TorchDeterministic":
return self
@DeveloperAPI
| TorchDeterministic |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_special_math_ops_test.py | {
"start": 11694,
"end": 22954
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_besseli_boundary(self):
self.assertAllClose(1., special_math_ops.bessel_i0(0.))
self.assertAllClose(1., special_math_ops.bessel_i0e(0.))
self.assertAllClose(0., special_math_ops.bessel_i1(0.))
self.assertAllClose(0., special_math_ops.bessel_i1e(0.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_i0(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_i0e(np.nan))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_i1(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_i1e(np.nan))))
@test_util.run_in_graph_and_eager_modes
def test_besselj_boundary(self):
self.assertAllClose(1., special_math_ops.bessel_j0(0.))
self.assertAllClose(0., special_math_ops.bessel_j1(0.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_j0(np.nan))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_j1(np.nan))))
@test_util.run_in_graph_and_eager_modes
def test_besselk_boundary(self):
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k0(0.))))
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k0e(0.))))
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k1(0.))))
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k1e(0.))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_k0(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_k0e(np.nan))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_k1(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_k1e(np.nan))))
@parameterized.parameters(np.float32, np.float64)
def test_i0j0_even(self, dtype):
x = _get_weak_tensor(
np.random.uniform(-100.0, 100.0, size=int(1e4)).astype(dtype)
)
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i0(x)),
self.evaluate(special_math_ops.bessel_i0(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i0e(x)),
self.evaluate(special_math_ops.bessel_i0e(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_j0(x)),
self.evaluate(special_math_ops.bessel_j0(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_i1j1_odd(self, dtype):
x = _get_weak_tensor(
np.random.uniform(-100.0, 100.0, size=int(1e4)).astype(dtype)
)
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i1(x)),
self.evaluate(-special_math_ops.bessel_i1(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i1e(x)),
self.evaluate(-special_math_ops.bessel_i1e(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_j1(x)),
self.evaluate(-special_math_ops.bessel_j1(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_besseli_small(self, dtype):
x = np.random.uniform(-1.0, 1.0, size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.i0(x), self.evaluate(special_math_ops.bessel_i0(x_wt))
)
self.assertAllClose(
special.i1(x), self.evaluate(special_math_ops.bessel_i1(x_wt))
)
self.assertAllClose(
special.i0e(x), self.evaluate(special_math_ops.bessel_i0e(x_wt))
)
self.assertAllClose(
special.i1e(x), self.evaluate(special_math_ops.bessel_i1e(x_wt))
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselj_small(self, dtype):
x = np.random.uniform(-1.0, 1.0, size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.j0(x), self.evaluate(special_math_ops.bessel_j0(x_wt))
)
self.assertAllClose(
special.j1(x), self.evaluate(special_math_ops.bessel_j1(x_wt))
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselk_small(self, dtype):
x = np.random.uniform(np.finfo(dtype).eps, 1.0, size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.k0(x), self.evaluate(special_math_ops.bessel_k0(x_wt))
)
self.assertAllClose(
special.k0e(x), self.evaluate(special_math_ops.bessel_k0e(x_wt))
)
self.assertAllClose(
special.k1(x), self.evaluate(special_math_ops.bessel_k1(x_wt))
)
self.assertAllClose(
special.k1e(x), self.evaluate(special_math_ops.bessel_k1e(x_wt))
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_bessely_small(self, dtype):
x = np.random.uniform(np.finfo(dtype).eps, 1.0, size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.y0(x), self.evaluate(special_math_ops.bessel_y0(x_wt))
)
self.assertAllClose(
special.y1(x), self.evaluate(special_math_ops.bessel_y1(x_wt))
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besseli_larger(self, dtype):
x = np.random.uniform(1.0, 20.0, size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.i0e(x), self.evaluate(special_math_ops.bessel_i0e(x_wt))
)
self.assertAllClose(
special.i1e(x), self.evaluate(special_math_ops.bessel_i1e(x_wt))
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselj_larger(self, dtype):
x = np.random.uniform(1.0, 30.0, size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.j0(x), self.evaluate(special_math_ops.bessel_j0(x_wt))
)
self.assertAllClose(
special.j1(x), self.evaluate(special_math_ops.bessel_j1(x_wt))
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselk_larger(self, dtype):
x = np.random.uniform(1.0, 30.0, size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.k0(x), self.evaluate(special_math_ops.bessel_k0(x_wt))
)
self.assertAllClose(
special.k0e(x), self.evaluate(special_math_ops.bessel_k0e(x_wt))
)
self.assertAllClose(
special.k1(x), self.evaluate(special_math_ops.bessel_k1(x_wt))
)
self.assertAllClose(
special.k1e(x), self.evaluate(special_math_ops.bessel_k1e(x_wt))
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_bessely_larger(self, dtype):
x = np.random.uniform(1.0, 30.0, size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.y0(x), self.evaluate(special_math_ops.bessel_y0(x_wt))
)
self.assertAllClose(
special.y1(x), self.evaluate(special_math_ops.bessel_y1(x_wt))
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_besseli_gradient(self):
inputs = [_get_weak_tensor(np.random.uniform(-10.0, 10.0, size=int(1e2)))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-3)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i0e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-3)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i1e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_besselj_gradient(self):
inputs = [_get_weak_tensor(np.random.uniform(-50.0, 50.0, size=int(1e2)))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_j0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_j1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_besselk_gradient(self):
inputs = [_get_weak_tensor(np.random.uniform(1.0, 50.0, size=int(1e2)))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k0e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k1e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_bessely_gradient(self):
inputs = [_get_weak_tensor(np.random.uniform(1.0, 50.0, size=int(1e2)))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_y0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_y1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
# TODO(b/291943949): Add WeakTensor support for Einsum.
if __name__ == '__main__':
ops.set_dtype_conversion_mode('all')
test.main()
| BesselTest |
python | joke2k__faker | faker/providers/ssn/en_GB/__init__.py | {
"start": 68,
"end": 1303
} | class ____(BaseProvider):
# Source:
# https://en.wikipedia.org/wiki/National_Insurance_number
# UK National Insurance numbers (NINO) follow a specific format
# To avoid generating real NINOs, the prefix and suffix letters
# remain static using values reserved by HMRC (never to be used).
# Example format: "QR 12 34 56 C" or "QR123456C" - only alphanumeric
# and whitespace characters are permitted. Whitespace is for readability
# only and is generally included as per the above examples, but a
# few 'styles' have been included below for the sake of realism.
nino_formats: Tuple[str, ...] = (
"ZZ ## ## ## T",
"ZZ######T",
"ZZ ###### T",
)
def ssn(self) -> str:
pattern: str = self.random_element(self.nino_formats)
return self.numerify(self.generator.parse(pattern))
vat_id_formats: Tuple[str, ...] = (
"GB### #### ##",
"GB### #### ## ###",
"GBGD###",
"GBHA###",
)
def vat_id(self) -> str:
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random British VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
| Provider |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 15258,
"end": 15442
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("CONFLICTING", "MERGEABLE", "UNKNOWN")
| MergeableState |
python | pypa__pip | src/pip/_internal/cli/spinners.py | {
"start": 809,
"end": 2650
} | class ____(SpinnerInterface):
def __init__(
self,
message: str,
file: IO[str] | None = None,
spin_chars: str = SPINNER_CHARS,
# Empirically, 8 updates/second looks nice
min_update_interval_seconds: float = 1 / SPINS_PER_SECOND,
):
self._message = message
if file is None:
file = sys.stdout
self._file = file
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._finished = False
self._spin_cycle = itertools.cycle(spin_chars)
self._file.write(" " * get_indentation() + self._message + " ... ")
self._width = 0
def _write(self, status: str) -> None:
assert not self._finished
# Erase what we wrote before by backspacing to the beginning, writing
# spaces to overwrite the old text, and then backspacing again
backup = "\b" * self._width
self._file.write(backup + " " * self._width + backup)
# Now we have a blank slate to add our status
self._file.write(status)
self._width = len(status)
self._file.flush()
self._rate_limiter.reset()
def spin(self) -> None:
if self._finished:
return
if not self._rate_limiter.ready():
return
self._write(next(self._spin_cycle))
def finish(self, final_status: str) -> None:
if self._finished:
return
self._write(final_status)
self._file.write("\n")
self._file.flush()
self._finished = True
# Used for dumb terminals, non-interactive installs (no tty), etc.
# We still print updates occasionally (once every 60 seconds by default) to
# act as a keep-alive for systems like Travis-CI that take lack-of-output as
# an indication that a task has frozen.
| InteractiveSpinner |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/dynamic_rendezvous.py | {
"start": 18074,
"end": 27489
} | class ____(_RendezvousOpExecutor):
"""Execute rendezvous operations using a shared state.
Args:
node:
The node descriptor associated with the current rendezvous handler
instance.
state_holder:
The ``RendezvousStateHolder`` to use to sync the rendezvous state
with other nodes.
settings:
The rendezvous settings.
"""
_node: _NodeDesc
_state: _RendezvousState
_state_holder: _RendezvousStateHolder
_settings: RendezvousSettings
def __init__(
self,
node: _NodeDesc,
state_holder: _RendezvousStateHolder,
settings: RendezvousSettings,
) -> None:
self._node = node
self._state_holder = state_holder
self._settings = settings
def _record(self, message: str, node_state: NodeState = NodeState.RUNNING) -> None:
construct_and_record_rdzv_event(
name=f"{self.__class__.__name__}.{get_method_name()}",
run_id=self._settings.run_id,
message=message,
node_state=node_state,
hostname=self._node.addr,
pid=self._node.pid,
local_id=self._node.local_id,
)
def run(
self,
state_handler: Callable[[_RendezvousContext, float], _Action],
deadline: float,
update_deadline: Callable[[timedelta], float] | None = None,
) -> None:
"""See base class."""
action = None
while action != _Action.FINISH:
# Reads or writes the latest rendezvous state shared by all nodes in
# the rendezvous. Note that our local changes might get overridden
# by another node if that node synced its changes before us.
has_set = self._state_holder.sync()
if has_set is not None:
if has_set:
msg = (
f"The node '{self._node}' has successfully synced its local changes with "
f"other nodes in the rendezvous '{self._settings.run_id}'."
)
else:
msg = (
f"The node '{self._node}' has a stale state and failed to sync its local "
f"changes with other nodes in the rendezvous '{self._settings.run_id}'."
)
self._record(message=msg)
logger.debug(msg)
self._state = self._state_holder.state
ctx = _RendezvousContext(self._node, self._state, self._settings)
# Determine the next action to take based on the current state of
# the rendezvous.
action = state_handler(ctx, deadline)
if action == _Action.FINISH:
continue
if action == _Action.ERROR_CLOSED:
raise RendezvousClosedError
if action == _Action.ERROR_TIMEOUT:
raise RendezvousTimeoutError
if action == _Action.SYNC:
# Delay the execution by one second to avoid overloading the
# backend if we are asked to poll for state changes.
_delay(seconds=1)
else:
if action == _Action.KEEP_ALIVE:
self._keep_alive()
elif action == _Action.ADD_TO_PARTICIPANTS:
self._add_to_participants()
elif action == _Action.ADD_TO_WAIT_LIST:
self._add_to_wait_list()
elif action == _Action.ADD_TO_REDUNDANCY_LIST:
self._add_to_redundancy_list()
elif action == _Action.REMOVE_FROM_PARTICIPANTS:
self._remove_from_participants()
elif action == _Action.REMOVE_FROM_WAIT_LIST:
self._remove_from_wait_list()
elif action == _Action.REMOVE_FROM_REDUNDANCY_LIST:
self._remove_from_redundancy_list()
# update deadline since the node may participate in rendezvous process
if update_deadline:
deadline = update_deadline(self._settings.timeout.join)
elif action == _Action.MARK_RENDEZVOUS_COMPLETE:
self._mark_rendezvous_complete()
elif action == _Action.MARK_RENDEZVOUS_CLOSED:
self._mark_rendezvous_closed()
# Attempt to sync our changes back to other nodes.
self._state_holder.mark_dirty()
def _keep_alive(self) -> None:
msg = (
f"The node '{self._node}' updated its keep-alive heartbeat time for the rendezvous "
f"'{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
logger.debug(msg)
self._state.last_heartbeats[self._node] = datetime.now(timezone.utc)
def _add_to_participants(self) -> None:
msg = (
f"The node '{self._node}' added itself to the participants of round "
f"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
logger.debug(msg)
state = self._state
try:
state.wait_list.remove(self._node)
except KeyError:
pass
# The ranks of the participants will be set once the rendezvous is
# complete.
state.participants[self._node] = 0
self._keep_alive()
if len(state.participants) == self._settings.min_nodes:
state.deadline = (
datetime.now(timezone.utc) + self._settings.timeout.last_call
)
if len(state.participants) == self._settings.max_nodes:
self._mark_rendezvous_complete()
def _add_to_wait_list(self) -> None:
msg = (
f"The node '{self._node}' added itself to the wait list of round "
f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
logger.debug(msg)
if self._node in self._state.redundancy_list:
self._state.redundancy_list.remove(self._node)
self._state.wait_list.add(self._node)
self._keep_alive()
def _add_to_redundancy_list(self) -> None:
msg = (
f"The node '{self._node}' added itself to the redundancy list of round "
f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
logger.debug(msg)
self._state.redundancy_list.add(self._node)
self._keep_alive()
def _remove_from_participants(self) -> None:
msg = (
f"The node '{self._node}' removed itself from the participants of round "
f"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
logger.debug(msg)
state = self._state
del state.participants[self._node]
del state.last_heartbeats[self._node]
# Common epilogue shared with the sanitizer() function of
# _BackendRendezvousStateHolder.
_remove_participant_epilogue(state, self._settings)
def _remove_from_wait_list(self) -> None:
msg = (
f"The node '{self._node}' removed itself from the wait list of round "
f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
logger.debug(msg)
self._state.wait_list.remove(self._node)
del self._state.last_heartbeats[self._node]
def _remove_from_redundancy_list(self) -> None:
msg = (
f"The node '{self._node}' removed itself from the redundant list of round "
f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync."
)
self._record(message=msg)
logger.debug(msg)
self._state.redundancy_list.remove(self._node)
del self._state.last_heartbeats[self._node]
def _mark_rendezvous_complete(self) -> None:
msg = (
f"The node '{self._node}' marked round {self._state.round} of the rendezvous "
f"'{self._settings.run_id}' as complete. Pending sync."
)
self._record(message=msg, node_state=NodeState.SUCCEEDED)
logger.debug(msg)
state = self._state
state.complete = True
state.deadline = None
# Assign the ranks.
for rank, node in enumerate(sorted(state.participants)):
state.participants[node] = rank
def _mark_rendezvous_closed(self) -> None:
msg = (
f"The node '{self._node}' marked the rendezvous '{self._settings.run_id}' as closed. "
"Pending sync."
)
self._record(message=msg, node_state=NodeState.SUCCEEDED)
logger.debug(msg)
self._state.closed = True
def _should_keep_alive(ctx: _RendezvousContext) -> bool:
"""Determine whether a keep-alive heartbeat should be sent."""
try:
last_heartbeat = ctx.state.last_heartbeats[ctx.node]
except KeyError:
return False
return (
last_heartbeat <= datetime.now(timezone.utc) - ctx.settings.keep_alive_interval
)
| _DistributedRendezvousOpExecutor |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_annotations/mypy_init_return.py | {
"start": 192,
"end": 259
} | class ____:
def __init__(self, foo) -> None:
...
# OK
| Foo |
python | getsentry__sentry | tests/sentry/models/test_releaseprojectenvironment.py | {
"start": 283,
"end": 3408
} | class ____(TestCase):
def setUp(self) -> None:
self.project = self.create_project(name="foo")
self.datetime_now = timezone.now()
self.release = Release.objects.create(
organization_id=self.project.organization_id, version="42"
)
self.release.add_project(self.project)
self.environment = Environment.objects.create(
organization_id=self.project.organization_id, name="prod"
)
def test_create(self) -> None:
release_project_env = ReleaseProjectEnvironment.get_or_create(
project=self.project,
release=self.release,
environment=self.environment,
datetime=self.datetime_now,
)
assert release_project_env.project_id == self.project.id
assert release_project_env.release_id == self.release.id
assert release_project_env.environment_id == self.environment.id
assert release_project_env.first_seen == self.datetime_now
assert release_project_env.last_seen == self.datetime_now
assert release_project_env.new_issues_count == 0
def test_updates_last_seen(self) -> None:
release_project_env = ReleaseProjectEnvironment.get_or_create(
project=self.project,
release=self.release,
environment=self.environment,
datetime=self.datetime_now,
)
assert release_project_env.project_id == self.project.id
assert release_project_env.release_id == self.release.id
assert release_project_env.environment_id == self.environment.id
datetime_next = self.datetime_now + timedelta(days=1)
release_project_env = ReleaseProjectEnvironment.get_or_create(
project=self.project,
release=self.release,
environment=self.environment,
datetime=datetime_next,
)
assert release_project_env.first_seen == self.datetime_now
assert release_project_env.last_seen == datetime_next
def test_no_update_too_close(self) -> None:
"""
Test ensures that ReleaseProjectEnvironment's last_seen is not updated if the next time
it is seen is too close to the last time it was seen.
"""
release_project_env = ReleaseProjectEnvironment.get_or_create(
project=self.project,
release=self.release,
environment=self.environment,
datetime=self.datetime_now,
)
assert release_project_env.project_id == self.project.id
assert release_project_env.release_id == self.release.id
assert release_project_env.environment_id == self.environment.id
datetime_next = self.datetime_now + timedelta(seconds=1)
release_project_env = ReleaseProjectEnvironment.get_or_create(
project=self.project,
release=self.release,
environment=self.environment,
datetime=datetime_next,
)
assert release_project_env.first_seen == self.datetime_now
assert release_project_env.last_seen == self.datetime_now
| GetOrCreateTest |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 37127,
"end": 37202
} | class ____(Interface):
"""Localizer for a specific language"""
| ILocalizer |
python | Netflix__metaflow | metaflow/cli_components/utils.py | {
"start": 4566,
"end": 6057
} | class ____(click.Group):
def __init__(self, *args, lazy_subcommands=None, **kwargs):
super().__init__(*args, **kwargs)
# lazy_subcommands is a list of strings in the form
# "{command} -> "{module-name}.{command-object-name}"
self.lazy_subcommands = lazy_subcommands or {}
self._lazy_loaded = {}
def list_commands(self, ctx):
base = super().list_commands(ctx)
lazy = sorted(self.lazy_subcommands.keys())
return base + lazy
def get_command(self, ctx, cmd_name):
if cmd_name in self.lazy_subcommands:
return self._lazy_load(cmd_name)
return super().get_command(ctx, cmd_name)
def _lazy_load(self, cmd_name):
if cmd_name in self._lazy_loaded:
return self._lazy_loaded[cmd_name]
import_path = self.lazy_subcommands[cmd_name]
modname, cmd = import_path.rsplit(".", 1)
# do the import
mod = importlib.import_module(modname)
# get the Command object from that module
cmd_object = getattr(mod, cmd)
# check the result to make debugging easier. note that wrapped BaseCommand
# can be functions
if not isinstance(cmd_object, click.BaseCommand):
raise ValueError(
f"Lazy loading of {import_path} failed by returning "
f"a non-command object {type(cmd_object)}"
)
self._lazy_loaded[cmd_name] = cmd_object
return cmd_object
| LazyGroup |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 17884,
"end": 23511
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertIsNone(spec1._shape.rank)
self.assertEqual(spec1._values_dtype, dtypes.float32)
self.assertEqual(spec1._indices_dtype, dtypes.int64)
self.assertIsNone(spec1._dense_shape_dtype)
self.assertEqual(spec1._indices_shape.as_list(), [None])
spec2 = indexed_slices.IndexedSlicesSpec([None, None], dtypes.string,
dtypes.int32, dtypes.int64, [10])
self.assertEqual(spec2._shape.as_list(), [None, None])
self.assertEqual(spec2._values_dtype, dtypes.string)
self.assertEqual(spec2._indices_dtype, dtypes.int32)
self.assertEqual(spec2._dense_shape_dtype, dtypes.int64)
self.assertEqual(spec2._indices_shape.as_list(), [10])
def testValueType(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1.value_type, indexed_slices.IndexedSlices)
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32,
dtypes.int64, None, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int64),
(tensor_shape.TensorShape(None), dtypes.int32, dtypes.int64,
dtypes.int64, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(indices_shape=[100]),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([100]))),
]) # pyformat: disable
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(dtype=dtypes.string), (
tensor_lib.TensorSpec(None, dtypes.string),
tensor_lib.TensorSpec([None], dtypes.int64),
)),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.string, dense_shape_dtype=dtypes.int32), (
tensor_lib.TensorSpec(None, dtypes.string),
tensor_lib.TensorSpec([None], dtypes.int64),
tensor_lib.TensorSpec([None], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32), (
tensor_lib.TensorSpec([None, 10, 15], dtypes.float32),
tensor_lib.TensorSpec([None], dtypes.int64),
tensor_lib.TensorSpec([3], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32,
indices_shape=[20]), (
tensor_lib.TensorSpec([20, 10, 15], dtypes.float32),
tensor_lib.TensorSpec([20], dtypes.int64),
tensor_lib.TensorSpec([3], dtypes.int32),
)),
])
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
{
"spec": indexed_slices.IndexedSlicesSpec(),
"values": [3.0, 5.0],
"indices": [5, 10]
},
{
"spec":
indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32),
"values": [3.0, 5.0],
"indices": [5, 10],
"dense_shape": [100]
},
])
def testToFromComponents(self, spec, indices, values, dense_shape=None):
x = indexed_slices.IndexedSlices(indices, values, dense_shape)
actual_components = spec._to_components(x)
if dense_shape is None:
self.assertAllTensorsEqual(actual_components, [indices, values])
else:
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = spec._from_components(actual_components)
self.assertAllEqual(x.indices, st_reconstructed.indices)
self.assertAllEqual(x.values, st_reconstructed.values)
if dense_shape is None:
self.assertIsNone(st_reconstructed.dense_shape)
else:
self.assertAllEqual(x.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("IndexedSlicesValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([3, 8])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec1 = indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32)
st1 = spec1._from_components((values, indices, dense_shape))
self.assertIsInstance(st1, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st1.indices, indices)
self.assertAllEqual(st1.values, values)
self.assertAllEqual(st1.dense_shape, dense_shape)
spec2 = indexed_slices.IndexedSlicesSpec()
st2 = spec2._from_components((values, indices))
self.assertIsInstance(st2, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st2.indices, indices)
self.assertAllEqual(st2.values, values)
self.assertIsNone(st2.dense_shape)
| IndexedSlicesSpecTest |
python | openai__openai-python | src/openai/types/realtime/output_audio_buffer_clear_event_param.py | {
"start": 232,
"end": 504
} | class ____(TypedDict, total=False):
type: Required[Literal["output_audio_buffer.clear"]]
"""The event type, must be `output_audio_buffer.clear`."""
event_id: str
"""The unique ID of the client event used for error handling."""
| OutputAudioBufferClearEventParam |
python | pypa__setuptools | setuptools/tests/test_setopt.py | {
"start": 61,
"end": 1351
} | class ____:
@staticmethod
def parse_config(filename):
parser = configparser.ConfigParser()
with open(filename, encoding='utf-8') as reader:
parser.read_file(reader)
return parser
@staticmethod
def write_text(file, content):
with open(file, 'wb') as strm:
strm.write(content.encode('utf-8'))
def test_utf8_encoding_retained(self, tmpdir):
"""
When editing a file, non-ASCII characters encoded in
UTF-8 should be retained.
"""
config = tmpdir.join('setup.cfg')
self.write_text(str(config), '[names]\njaraco=джарако')
setopt.edit_config(str(config), dict(names=dict(other='yes')))
parser = self.parse_config(str(config))
assert parser.get('names', 'jaraco') == 'джарако'
assert parser.get('names', 'other') == 'yes'
def test_case_retained(self, tmpdir):
"""
When editing a file, case of keys should be retained.
"""
config = tmpdir.join('setup.cfg')
self.write_text(str(config), '[names]\nFoO=bAr')
setopt.edit_config(str(config), dict(names=dict(oTher='yes')))
actual = config.read_text(encoding='ascii')
assert 'FoO' in actual
assert 'oTher' in actual
| TestEdit |
python | google__jax | jax/_src/core.py | {
"start": 57911,
"end": 59118
} | class ____:
__slots__: list[str] = []
is_high = False
has_qdd = False
def to_tangent_aval(self):
raise NotImplementedError("must override")
# TODO(dougalm): deprecate this alias
def at_least_vspace(self):
return self.to_tangent_aval()
def __repr__(self):
try:
kv_pairs = (f'{k}={v}' for k, v in self.__dict__.items())
return '{}({})'.format(self.__class__.__name__, ','.join(kv_pairs))
except AttributeError:
return self.__class__.__name__
def update_weak_type(self, weak_type):
return self
def update_vma(self, vma):
return self
def strip_weak_type(self) -> AbstractValue:
return self.update_weak_type(False)
def normalize(self) -> AbstractValue:
return self.strip_weak_type()
def update(self, **kwargs):
raise NotImplementedError("must override")
def lo_ty(self):
return [self]
def lo_ty_qdd(self, qdd):
raise NotImplementedError("avals with qdd must override")
def str_short(self, short_dtypes=False, mesh_axis_types=False):
return str(self)
# For type signatures involving dynamic shapes, we use lists of abstract values
# which may contain (reverse) de Bruijn indices in their shapes.
| AbstractValue |
python | pytorch__pytorch | test/test_autocast.py | {
"start": 6534,
"end": 7016
} | class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, x, w_t):
ctx.save_for_backward(x, w_t)
return torch.nn.functional.linear(x, w_t)
@staticmethod
def backward(ctx, grad_output):
x, w_t = ctx.saved_tensors
with torch.autocast(device_type="cuda"):
dL_dX = torch.matmul(grad_output, w_t)
dL_dW = torch.matmul(x.transpose(0, 1), grad_output).transpose(0, 1)
return dL_dX, dL_dW
| CustomLinear |
python | sympy__sympy | doc/ext/docscrape_sphinx.py | {
"start": 131,
"end": 8299
} | class ____(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self, name='Returns'):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**{}** : {}'.format(param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**{}** : {}'.format(param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
# Lines that are commented out are used to make the
# autosummary:: table. Since SymPy does not use the
# autosummary:: functionality, it is easiest to just comment it
# out.
# autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
# if param_obj and (pydoc.getdoc(param_obj) or not desc):
# # Referenced object has a docstring
# autosum += [" %s%s" % (prefix, param)]
# else:
others.append((param, param_type, desc))
# if autosum:
# out += ['.. autosummary::']
# if self.class_members_toctree:
# out += [' :toctree:']
# out += [''] + autosum
if others:
out += [r'.. tabularcolumns:: p{3cm}p{\dimexpr\linewidth-3cm-4\tabcolsep\relax}']
out += ['.. rst-class:: longtable']
maxlen_0 = max(3, max(len(x[0]) for x in others))
hdr = "="*maxlen_0 + " " + "="*10
fmt = '%%%ds %%s ' % (maxlen_0,)
out += ['', '', hdr]
for param, param_type, desc in others:
desc = " ".join(x.strip() for x in desc).strip()
if param_type:
desc = "({}) {}".format(param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super()._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' {}: {}'.format(section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.IGNORECASE)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns('Returns')
out += self._str_returns('Yields')
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_see_also(func_role)
out += self._str_references()
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
| SphinxDocString |
python | getsentry__sentry | tests/sentry/deletions/test_alert_rule_trigger_action.py | {
"start": 423,
"end": 1571
} | class ____(BaseWorkflowTest, HybridCloudTestMixin):
def test_simple(self) -> None:
incident = self.create_incident()
alert_rule_trigger_action = self.create_alert_rule_trigger_action()
notification_message = NotificationMessage(
message_identifier="s3iojewd90j23eqw",
incident=incident,
trigger_action=alert_rule_trigger_action,
)
notification_message.save()
action = self.create_action()
ActionAlertRuleTriggerAction.objects.create(
action=action, alert_rule_trigger_action_id=alert_rule_trigger_action.id
)
self.ScheduledDeletion.schedule(instance=alert_rule_trigger_action, days=0)
with self.tasks():
run_scheduled_deletions()
assert not AlertRuleTriggerAction.objects.filter(id=alert_rule_trigger_action.id).exists()
assert not NotificationMessage.objects.filter(id=notification_message.id).exists()
assert not ActionAlertRuleTriggerAction.objects.filter(
alert_rule_trigger_action_id=alert_rule_trigger_action.id
).exists()
| DeleteAlertRuleTriggerActionTest |
python | readthedocs__readthedocs.org | readthedocs/projects/exceptions.py | {
"start": 391,
"end": 479
} | class ____(BuildUserError):
FILE_NOT_FOUND = "project:file:not-found"
| UserFileNotFound |
python | readthedocs__readthedocs.org | readthedocs/builds/tests/test_tasks.py | {
"start": 4385,
"end": 12852
} | class ____(TestCase):
def setUp(self):
self.user = get(User)
self.github_app_installation = get(
GitHubAppInstallation,
installation_id=1111,
target_id=1111,
target_type=GitHubAccountType.USER,
)
self.remote_repository = get(
RemoteRepository,
name="repo",
full_name="user/repo",
vcs_provider=GITHUB_APP,
github_app_installation=self.github_app_installation,
)
self.project = get(
Project,
name="My project",
slug="my-project",
users=[self.user],
remote_repository=self.remote_repository,
)
self.base_version = self.project.versions.get(slug=LATEST)
self.base_version.built = True
self.base_version.save()
self.base_version_build = get(
Build,
project=self.project,
version=self.base_version,
commit="1234abcd",
state=BUILD_STATE_FINISHED,
success=True,
)
self.current_version = get(
Version,
project=self.project,
verbose_name="1",
slug="1",
type=EXTERNAL,
active=True,
built=True,
)
self.current_version_build = get(
Build,
project=self.project,
version=self.current_version,
commit="5678abcd",
state=BUILD_STATE_FINISHED,
success=True,
)
@mock.patch.object(GitHubAppService, "post_comment")
@mock.patch("readthedocs.builds.reporting.get_diff")
def test_post_build_overview(self, get_diff, post_comment):
get_diff.return_value = FileTreeDiff(
current_version=self.current_version,
current_version_build=self.current_version_build,
base_version=self.base_version,
base_version_build=self.base_version_build,
files=[
("index.html", FileTreeDiffFileStatus.modified),
("changes.html", FileTreeDiffFileStatus.added),
("deleteme.html", FileTreeDiffFileStatus.deleted),
],
outdated=False,
)
post_build_overview(build_pk=self.current_version_build.pk)
expected_comment = dedent(
f"""
### Documentation build overview
> 📚 [My project](https://readthedocs.org/projects/my-project/) | 🛠️ Build [#{self.current_version_build.id}](https://readthedocs.org/projects/my-project/builds/{self.current_version_build.id}/) | 📁 Comparing 5678abcd against [latest](http://my-project.readthedocs.io/en/latest/) (1234abcd)
[<kbd><br />🔍 Preview build <br /></kbd>](http://my-project--1.readthedocs.build/en/1/)
<details>
<summary>Show files changed (3 files in total): 📝 1 modified | ➕ 1 added | ➖ 1 deleted</summary>
| File | Status |
| --- | --- |
| [changes.html](http://my-project--1.readthedocs.build/en/1/changes.html) | ➕ added |
| [deleteme.html](http://my-project--1.readthedocs.build/en/1/deleteme.html) | ➖ deleted |
| [index.html](http://my-project--1.readthedocs.build/en/1/index.html) | 📝 modified |
</details>
"""
)
post_comment.assert_called_once_with(
build=self.current_version_build,
comment=expected_comment,
create_new=True,
)
@mock.patch.object(GitHubAppService, "post_comment")
@mock.patch("readthedocs.builds.reporting.get_diff")
def test_post_build_overview_more_than_5_files(self, get_diff, post_comment):
get_diff.return_value = FileTreeDiff(
current_version=self.current_version,
current_version_build=self.current_version_build,
base_version=self.base_version,
base_version_build=self.base_version_build,
files=[
("index.html", FileTreeDiffFileStatus.modified),
("changes.html", FileTreeDiffFileStatus.added),
("deleteme.html", FileTreeDiffFileStatus.deleted),
("one.html", FileTreeDiffFileStatus.modified),
("three.html", FileTreeDiffFileStatus.modified),
("two.html", FileTreeDiffFileStatus.modified),
],
outdated=False,
)
post_build_overview(build_pk=self.current_version_build.pk)
expected_comment = dedent(
f"""
### Documentation build overview
> 📚 [My project](https://readthedocs.org/projects/my-project/) | 🛠️ Build [#{self.current_version_build.id}](https://readthedocs.org/projects/my-project/builds/{self.current_version_build.id}/) | 📁 Comparing 5678abcd against [latest](http://my-project.readthedocs.io/en/latest/) (1234abcd)
[<kbd><br />🔍 Preview build <br /></kbd>](http://my-project--1.readthedocs.build/en/1/)
<details>
<summary>Show files changed (6 files in total): 📝 4 modified | ➕ 1 added | ➖ 1 deleted</summary>
| File | Status |
| --- | --- |
| [changes.html](http://my-project--1.readthedocs.build/en/1/changes.html) | ➕ added |
| [deleteme.html](http://my-project--1.readthedocs.build/en/1/deleteme.html) | ➖ deleted |
| [index.html](http://my-project--1.readthedocs.build/en/1/index.html) | 📝 modified |
| [one.html](http://my-project--1.readthedocs.build/en/1/one.html) | 📝 modified |
| [three.html](http://my-project--1.readthedocs.build/en/1/three.html) | 📝 modified |
| [two.html](http://my-project--1.readthedocs.build/en/1/two.html) | 📝 modified |
</details>
"""
)
post_comment.assert_called_once_with(
build=self.current_version_build,
comment=expected_comment,
create_new=True,
)
@mock.patch.object(GitHubAppService, "post_comment")
@mock.patch("readthedocs.builds.reporting.get_diff")
def test_post_build_overview_no_files_changed(self, get_diff, post_comment):
get_diff.return_value = FileTreeDiff(
current_version=self.current_version,
current_version_build=self.current_version_build,
base_version=self.base_version,
base_version_build=self.base_version_build,
files=[],
outdated=False,
)
post_build_overview(build_pk=self.current_version_build.pk)
expected_comment = dedent(
f"""
### Documentation build overview
> 📚 [My project](https://readthedocs.org/projects/my-project/) | 🛠️ Build [#{self.current_version_build.id}](https://readthedocs.org/projects/my-project/builds/{self.current_version_build.id}/) | 📁 Comparing 5678abcd against [latest](http://my-project.readthedocs.io/en/latest/) (1234abcd)
[<kbd><br />🔍 Preview build <br /></kbd>](http://my-project--1.readthedocs.build/en/1/)
No files changed.
"""
)
post_comment.assert_called_once_with(
build=self.current_version_build,
comment=expected_comment,
create_new=False,
)
@mock.patch.object(GitHubAppService, "post_comment")
def test_post_build_overview_no_external_version(self, post_comment):
assert not self.base_version.is_external
post_build_overview(build_pk=self.base_version_build.pk)
post_comment.assert_not_called()
@mock.patch.object(GitHubAppService, "post_comment")
def test_post_build_overview_no_github_app_project(self, post_comment):
self.project.remote_repository = None
self.project.save()
assert not self.project.is_github_app_project
assert self.current_version.is_external
post_build_overview(build_pk=self.current_version_build.pk)
post_comment.assert_not_called()
@mock.patch.object(GitHubAppService, "post_comment")
@mock.patch("readthedocs.builds.reporting.get_diff")
def test_post_build_overview_no_diff_available(self, get_diff, post_comment):
get_diff.return_value = None
assert self.current_version.is_external
post_build_overview(build_pk=self.current_version_build.pk)
post_comment.assert_not_called()
| TestPostBuildOverview |
python | google__jax | tests/pallas/pallas_test.py | {
"start": 76611,
"end": 76717
} | class ____(PallasCallAutodifferentiationTest):
INTERPRET = True
| PallasCallAutodifferentiationInterpretTest |
python | falconry__falcon | tests/test_cookies.py | {
"start": 485,
"end": 1295
} | class ____:
def on_get(self, req, resp):
resp.set_cookie('foo', 'bar', domain='example.com', path='/')
def on_head(self, req, resp):
resp.set_cookie('foo', 'bar', max_age=300)
resp.set_cookie('bar', 'baz', http_only=False)
resp.set_cookie('bad', 'cookie')
resp.unset_cookie('bad')
def on_post(self, req, resp):
e = datetime(year=2050, month=1, day=1) # naive
resp.set_cookie('foo', 'bar', http_only=False, secure=False, expires=e)
resp.unset_cookie('bad')
def on_put(self, req, resp):
e = datetime(
year=2050, month=1, day=1, tzinfo=timezone(timedelta(hours=1))
) # aware
resp.set_cookie('foo', 'bar', http_only=False, secure=False, expires=e)
resp.unset_cookie('bad')
| CookieResource |
python | google__pytype | pytype/directors/directors.py | {
"start": 1192,
"end": 4208
} | class ____:
"""A set of line numbers.
The data structure is optimized to represent the union of a sparse set
of integers and ranges of non-negative integers. This supports the two styles
of directives: those after a statement apply only to that line and those on
their own line apply until countered by the opposing directive.
"""
def __init__(self):
# Map of line->bool for specific lines, takes precedence over _transitions.
self._lines = {}
# A sorted list of the lines at which the range state changes
# polarity. It is assumed to initially be false (not in a range).
# Even positions represent the start of a range, odd positions represent
# the end of a range. Thus [2, 5, 10, 12] would include lines 2, 3, 4, 10,
# and 11. If the length is odd, then an end of maxint is implied, thus
# [2, 5, 10] would disable lines 2, 3, 4, 10, 11, 12, ...
self._transitions = []
@property
def lines(self):
return self._lines
def set_line(self, line, membership):
"""Set whether a given line is a member of the set."""
self._lines[line] = membership
def start_range(self, line, membership):
"""Start a range of lines that are either included/excluded from the set.
Args:
line: A line number.
membership: If True, lines >= line are included in the set (starting a
range), otherwise they are excluded (ending a range).
Raises:
ValueError: if line is less than that of a previous call to start_range().
"""
last = self._transitions[-1] if self._transitions else -1
# Assert that lines are monotonically increasing. This simplifies the
# logic of adding new lines and ensures that _ranges is sorted.
if line < last:
raise ValueError("Line number less than previous start_range() call.")
# Determine previous membership state (True if the last range has an
# indefinite end).
previous = (len(self._transitions) % 2) == 1
if membership == previous:
return # Redundant with previous state, do nothing.
elif line == last:
# We have either enable/disable or disable/enable on the same line,
# cancel them out by popping the previous transition.
self._transitions.pop()
else:
# Normal case - add a transition at this line.
self._transitions.append(line)
def __contains__(self, line):
"""Return if a line is a member of the set."""
# First check for an entry in _lines.
specific = self._lines.get(line)
if specific is not None:
return specific
# Find the position in _ranges for line. The polarity of this position
# determines whether we are inside a range (odd) or outside (even).
pos = bisect.bisect(self._transitions, line)
return (pos % 2) == 1
def get_disable_after(self, line):
"""Get an unclosed disable, if any, that starts after line."""
if len(self._transitions) % 2 == 1 and self._transitions[-1] >= line:
return self._transitions[-1]
return None
| _LineSet |
python | bokeh__bokeh | src/bokeh/server/server.py | {
"start": 2940,
"end": 11232
} | class ____:
''' Explicitly coordinate the level Tornado components required to run a
Bokeh server:
* A Tornado ``IOLoop`` to run the Bokeh server machinery.
* a ``BokehTornado`` Tornado application that defines the Bokeh server
machinery.
* a Tornado ``HTTPServer`` to direct HTTP requests
All three of these components must be passed to ``BaseServer``, which will
initialize the ``BokehTornado`` instance on the ``io_loop``. The
``http_server`` must have been previously created and initialized with the
``BokehTornado`` instance.
'''
def __init__(self, io_loop: IOLoop, tornado_app: BokehTornado, http_server: HTTPServer) -> None:
''' Create a ``BaseServer`` instance.
Args:
io_loop (IOLoop) :
A Tornado ``IOLoop`` to run the Bokeh Tornado application on.
tornado_app (BokehTornado) :
An instance of the Bokeh Tornado application that generates
Bokeh Documents and Sessions.
http_server (HTTPServer) :
A Tornado ``HTTPServer`` to service HTTP requests for Bokeh
applications. Should have already be configured with the
``tornado_app`` when created.
'''
self._started = False
self._stopped = False
self._http = http_server
self._loop = io_loop
self._tornado = tornado_app
self._tornado.initialize(io_loop)
@property
def io_loop(self) -> IOLoop:
''' The Tornado ``IOLoop`` that this Bokeh Server is running on.
'''
return self._loop
def start(self) -> None:
''' Install the Bokeh Server and its background tasks on a Tornado
``IOLoop``.
This method does *not* block and does *not* affect the state of the
Tornado ``IOLoop`` You must start and stop the loop yourself, i.e.
this method is typically useful when you are already explicitly
managing an ``IOLoop`` yourself.
To start a Bokeh server and immediately "run forever" in a blocking
manner, see :func:`~bokeh.server.server.BaseServer.run_until_shutdown`.
'''
assert not self._started, "Already started"
self._started = True
self._tornado.start()
def stop(self, wait: bool = True) -> None:
''' Stop the Bokeh Server.
This stops and removes all Bokeh Server ``IOLoop`` callbacks, as well
as stops the ``HTTPServer`` that this instance was configured with.
Args:
wait (bool):
Whether to wait for orderly cleanup (default: True)
Returns:
None
'''
assert not self._stopped, "Already stopped"
self._stopped = True
self._tornado.stop(wait)
self._http.stop()
def unlisten(self) -> None:
''' Stop listening on ports. The server will no longer be usable after
calling this function.
.. note::
This function is mostly useful for tests
Returns:
None
'''
self._http.stop()
self.io_loop.add_callback(self._http.close_all_connections)
def run_until_shutdown(self) -> None:
''' Run the Bokeh Server until shutdown is requested by the user,
either via a Keyboard interrupt (Ctrl-C) or SIGTERM.
Calling this method will start the Tornado ``IOLoop`` and block
all execution in the calling process.
Returns:
None
'''
if not self._started:
self.start()
# Install shutdown hooks
atexit.register(self._atexit)
signal.signal(signal.SIGTERM, self._sigterm)
try:
self._loop.start()
except KeyboardInterrupt:
print("\nInterrupted, shutting down")
self.stop()
def get_session(self, app_path: str, session_id: ID) -> ServerSession:
''' Get an active a session by name application path and session ID.
Args:
app_path (str) :
The configured application path for the application to return
a session for.
session_id (str) :
The session ID of the session to retrieve.
Returns:
ServerSession
'''
return self._tornado.get_session(app_path, session_id)
def get_sessions(self, app_path: str | None = None) -> list[ServerSession]:
''' Gets all currently active sessions for applications.
Args:
app_path (str, optional) :
The configured application path for the application to return
sessions for. If None, return active sessions for all
applications. (default: None)
Returns:
list[ServerSession]
'''
if app_path is not None:
return self._tornado.get_sessions(app_path)
all_sessions: list[ServerSession] = []
for path in self._tornado.app_paths:
all_sessions += self._tornado.get_sessions(path)
return all_sessions
def show(self, app_path: str, browser: str | None = None, new: BrowserTarget = "tab") -> None:
''' Opens an app in a browser window or tab.
This method is useful for testing or running Bokeh server applications
on a local machine but should not call when running Bokeh server for
an actual deployment.
Args:
app_path (str) : the app path to open
The part of the URL after the hostname:port, with leading slash.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the :doc:`webbrowser <python:library/webbrowser>`
module documentation in the standard lib for more details).
new (str, optional) : window or tab (default: "tab")
If ``new`` is 'tab', then opens a new tab.
If ``new`` is 'window', then opens a new window.
Returns:
None
'''
if not app_path.startswith("/"):
raise ValueError("app_path must start with a /")
address_string = 'localhost'
if self.address is not None and self.address != '':
address_string = self.address
url = f"http://{address_string}:{self.port}{self.prefix}{app_path}"
from bokeh.util.browser import view
view(url, browser=browser, new=new)
_atexit_ran = False
def _atexit(self) -> None:
if self._atexit_ran:
return
self._atexit_ran = True
log.debug("Shutdown: cleaning up")
if not self._stopped:
self.stop(wait=False)
def _sigterm(self, signum: int, frame: FrameType | None) -> None:
print(f"Received signal {signum}, shutting down")
# Tell self._loop.start() to return.
self._loop.add_callback_from_signal(self._loop.stop)
@property
def port(self) -> int | None:
''' The configured port number that the server listens on for HTTP requests
'''
sock = next(
sock for sock in self._http._sockets.values()
if sock.family in (socket.AF_INET, socket.AF_INET6)
)
return sock.getsockname()[1]
@property
def address(self) -> str | None:
''' The configured address that the server listens on for HTTP requests
'''
sock = next(
sock for sock in self._http._sockets.values()
if sock.family in (socket.AF_INET, socket.AF_INET6)
)
return sock.getsockname()[0]
@property
def prefix(self) -> str:
''' The configured URL prefix to use for all Bokeh server paths. '''
return self._tornado.prefix
@property
def index(self) -> str | None:
''' A path to a Jinja2 template to use for index at "/" '''
return self._tornado.index
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
| BaseServer |
python | matplotlib__matplotlib | lib/matplotlib/backend_tools.py | {
"start": 18755,
"end": 19173
} | class ____(ToolBase):
"""Base class for `ToolHome`, `ToolBack` and `ToolForward`."""
_on_trigger = None
def trigger(self, sender, event, data=None):
self.toolmanager.get_tool(_views_positions).add_figure(self.figure)
getattr(self.toolmanager.get_tool(_views_positions),
self._on_trigger)()
self.toolmanager.get_tool(_views_positions).update_view()
| ViewsPositionsBase |
python | walkccc__LeetCode | solutions/3265. Count Almost Equal Pairs I/3265.py | {
"start": 0,
"end": 681
} | class ____:
def countPairs(self, nums: list[int]) -> int:
ans = 0
count = collections.Counter()
maxLen = len(str(max(nums)))
for num in nums:
digits = list(str(num).zfill(maxLen))
for swap in self._getSwaps(digits):
ans += count[swap]
count[num] += 1
return ans
def _getSwaps(self, digits: str) -> set[int]:
"""Returns all possible numbers after 1 swap."""
n = len(digits)
swaps = set([int(''.join(digits))])
for i, j in itertools.combinations(range(n), 2):
newDigits = digits[:]
newDigits[i], newDigits[j] = newDigits[j], newDigits[i]
swaps.add(int(''.join(newDigits)))
return swaps
| Solution |
python | ApeWorX__ape | src/ape/managers/project.py | {
"start": 66075,
"end": 78500
} | class ____(ProjectManager):
"""
Base class for projects. Projects can come from either
manifests or local source-paths.
"""
def __init__(self, manifest: PackageManifest, config_override: Optional[dict] = None):
self._manifest = manifest
self._config_override = config_override or {}
@log_instead_of_fail(default="<ProjectManager>")
def __repr__(self) -> str:
name = f" {self.project_id}"
# NOTE: 'Project' is meta for 'ProjectManager' (mixin magic).
return f"<ProjectManager{name}>"
@only_raise_attribute_error
def __getattr__(self, item: str) -> Any:
return get_attribute_with_extras(self, item)
def __contains__(self, item):
return item in self.contracts
@property
def name(self) -> str:
if name := self.config.get("name"):
return name
elif name := self.manifest.name:
return name
return f"unknown-project-{random.randint(100_000, 999_999)}"
@property
def version(self) -> str:
if version := self._config_override.get("version"):
return version
elif version := self.manifest.version:
return version
else:
return "0.1.0"
@property
def project_id(self) -> str:
return f"{self.name}_{self.version}"
@property
def is_compiled(self) -> bool:
"""
True if the project is compiled at all. Does not
ensure the compilation is up-to-date.
"""
return (self._manifest.contract_types or None) is not None
def __ape_extra_attributes__(self) -> Iterator[ExtraModelAttributes]:
extras = (
ExtraModelAttributes(
name="contracts",
attributes=lambda: self.contracts,
include_getitem=True,
),
ExtraModelAttributes(
name="manifest",
attributes=lambda: self.manifest,
include_getitem=True,
include_getattr=False, # avoids contract-type confusion.
),
)
# If manifest is not compiled, don't search for contracts right
# away to delay compiling if unnecessary.
yield from extras if self.manifest.contract_types else reversed(extras)
@property
def manifest(self) -> PackageManifest:
return self._manifest
@cached_property
def dependencies(self) -> DependencyManager:
"""
Project dependencies.
"""
return DependencyManager(project=self)
@cached_property
def config(self) -> ApeConfig:
return ApeConfig.from_manifest(self.manifest, **self._config_override)
@contextmanager
def isolate_in_tempdir(self, **config_override) -> Iterator["LocalProject"]:
"""
Clone this project to a temporary directory and return
its project.
"""
config_override = config_override or {}
name = config_override.get("name", self.name)
chdir = config_override.pop("chdir", False)
with create_tempdir(name=name) as path:
if chdir:
with self.chdir(path):
yield self.unpack(path, config_override=config_override)
else:
yield self.unpack(path, config_override=config_override)
@contextmanager
def temp_config(self, **config):
existing_overrides = self._config_override or {}
self.reconfigure(**config)
yield
self.reconfigure(**existing_overrides)
def get(self, name: str) -> Optional[ContractContainer]:
return self.contracts.get(name)
def unpack(self, destination: Path, config_override: Optional[dict] = None) -> "LocalProject":
"""
Unpack the project to a location using the information
from the manifest. Converts a manifest-based project
to a local one.
"""
config_override = {**self._config_override, **(config_override or {})}
sources = self.sources or {}
# Unpack contracts.
for source_id, src in sources.items():
path = destination / source_id
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(str(src.content), encoding="utf8")
# Unpack config file.
# NOTE: Always unpacks into a regular .yaml config file for simplicity
# and maximum portability.
self.config.write_to_disk(destination / "ape-config.yaml")
return LocalProject(destination, config_override=config_override)
def update_manifest(self, **kwargs):
"""
Change manifest values. Overwrites.
Args:
**kwargs: Top-level manifest attributes.
"""
for k, v in kwargs.items():
setattr(self._manifest, k, v)
def add_compiler_data(self, compiler_data: Iterable[Compiler]) -> list[Compiler]:
"""
Add compiler data to the existing cached manifest.
Args:
compiler_data (Iterable[``ethpm_types.Compiler``]): Compilers to add.
Returns:
List[``ethpm_types.source.Compiler``]: The full list of compilers.
"""
# Validate given data.
given_compilers = set(compiler_data)
num_compilers = len([x for x in compiler_data])
if len(given_compilers) != num_compilers:
raise ProjectError(
f"`{self.add_compiler_data.__name__}()` was given multiple of the same compiler. "
"Please filter inputs."
)
# Filter out given compilers without contract types.
given_compilers = {c for c in given_compilers if c.contractTypes}
if len(given_compilers) != num_compilers:
logger.warning(
f"`{self.add_compiler_data.__name__}()` given compilers without contract types. "
"Ignoring these inputs."
)
for given_compiler in given_compilers:
other_given_compilers = [c for c in given_compilers if c != given_compiler]
contract_types_from_others = [
n for c in other_given_compilers for n in (c.contractTypes or [])
]
collisions = {
n for n in (given_compiler.contractTypes or []) if n in contract_types_from_others
}
if collisions:
collide_str = ", ".join(collisions)
raise ProjectError(f"Contract type(s) '{collide_str}' collision across compilers.")
new_types = [n for c in given_compilers for n in (c.contractTypes or [])]
# Merge given compilers with existing compilers.
existing_compilers = self.manifest.compilers or []
# Existing compilers remaining after processing new compilers.
remaining_existing_compilers: list[Compiler] = []
for existing_compiler in existing_compilers:
find_iter = iter(x for x in compiler_data if x == existing_compiler)
if matching_given_compiler := next(find_iter, None):
# Compiler already exists in the system, possibly with different contract types.
# Merge contract types.
matching_given_compiler.contractTypes = list(
{
*(existing_compiler.contractTypes or []),
*(matching_given_compiler.contractTypes or []),
}
)
# NOTE: Purposely we don't add the existing compiler back,
# as it is the same as the given compiler, (meaning same
# name, version, and settings), and we have
# merged their contract types.
continue
else:
# Filter out contract types added now under a different compiler.
existing_compiler.contractTypes = [
c for c in (existing_compiler.contractTypes or []) if c not in new_types
]
# Clear output selection for new types, since they are present in the new compiler.
if existing_compiler.settings and "outputSelection" in existing_compiler.settings:
new_src_ids = {
(self.manifest.contract_types or {})[x].source_id
for x in new_types
if x in (self.manifest.contract_types or {})
and (self.manifest.contract_types or {})[x].source_id is not None
}
existing_compiler.settings["outputSelection"] = {
k: v
for k, v in existing_compiler.settings["outputSelection"].items()
if k not in new_src_ids
}
# Remove compilers without contract types.
if existing_compiler.contractTypes:
remaining_existing_compilers.append(existing_compiler)
# Use Compiler.__hash__ to remove duplicated.
# Also, sort for consistency.
compilers = sorted(
list({*remaining_existing_compilers, *compiler_data}),
key=lambda x: f"{x.name}@{x.version}",
)
self.update_manifest(compilers=compilers)
return self._manifest.compilers or compilers # for mypy.
@property
def contracts(self) -> dict[str, ContractContainer]:
return self.load_contracts()
@property
def sources(self) -> dict[str, Source]:
return self.manifest.sources or {}
def load_contracts(
self, *source_ids: Union[str, Path], use_cache: bool = True
) -> dict[str, ContractContainer]:
result = {
ct.name: ct
for ct in ((self.manifest.contract_types or {}) if use_cache else {}).values()
if ct.name
}
compiled_source_ids = {ct.source_id for ct in result.values() if ct.source_id}
source_iter: Iterable = source_ids or list(self.manifest.sources or {})
source_iter = [f"{x}" for x in source_iter]
missing_sources = set()
for src_id in source_iter:
if src_id not in compiled_source_ids:
missing_sources.add(src_id)
missing_sources_can_compile = {
s
for s in missing_sources
if get_full_extension(Path(s)) in self.compiler_manager.registered_compilers
}
if missing_sources_can_compile:
# Attempt to compile to get missing sources.
with self.isolate_in_tempdir() as temp_project:
new_contracts = {
n: c.contract_type
for n, c in temp_project.load_contracts(*missing_sources_can_compile).items()
}
if new_contracts:
self._update_contract_types(new_contracts)
result = {**result, **new_contracts}
return {n: ContractContainer(ct) for n, ct in result.items()}
def _update_contract_types(self, contract_types: dict[str, ContractType]):
contract_types = {**(self._manifest.contract_types or {}), **contract_types}
sources = dict(self.sources.items())
self.update_manifest(contract_types=contract_types, sources=sources)
def reconfigure(self, **overrides):
"""
Change a project's config.
Args:
**overrides: Config key-value pairs. Completely overrides
existing.
"""
if "config" in self.__dict__:
# Delete cached property.
del self.__dict__["config"]
original_override = self._config_override
self._config_override = overrides
try:
_ = self.config
except Exception:
# Ensure changes don't persist.
self._config_override = original_override
raise # Whatever error it is
self._invalidate_project_dependent_caches()
def extract_manifest(self) -> PackageManifest:
# Attempt to compile, if needed.
try:
self.load_contracts()
except CompilerError as err:
# Some manifest-based projects may not require compiling,
# such as OpenZeppelin or snekmate.
logger.warning(err)
return self.manifest
def clean(self):
self._manifest.contract_types = None
self._config_override = {}
| Project |
python | django__django | tests/model_regress/models.py | {
"start": 1065,
"end": 1251
} | class ____(models.Model):
department = models.ForeignKey(Department, models.CASCADE)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
| Worker |
python | PrefectHQ__prefect | tests/runner/test_storage.py | {
"start": 788,
"end": 3119
} | class ____:
@pytest.mark.parametrize(
"url, expected_type",
[
("git://github.com/user/repo.git", "GitRepository"),
("https://github.com/user/repo.git", "GitRepository"),
],
)
def test_create_git_storage(self, url, expected_type):
storage = create_storage_from_source(url)
assert isinstance(storage, eval(expected_type))
assert storage.pull_interval == 60 # default value
@pytest.mark.parametrize(
"url, pull_interval",
[
("git://github.com/user/repo.git", 120),
("http://github.com/user/repo.git", 30),
],
)
def test_create_git_storage_custom_pull_interval(self, url, pull_interval):
storage = create_storage_from_source(url, pull_interval=pull_interval)
assert isinstance(
storage, GitRepository
) # We already know it's GitRepository from above tests
assert storage.pull_interval == pull_interval
@pytest.mark.parametrize(
"url",
[
"s3://my-bucket/path/to/folder",
"ftp://example.com/path/to/folder",
],
)
def test_alternative_storage_url(self, url):
storage = create_storage_from_source(url)
assert isinstance(storage, RemoteStorage)
assert storage._url == url
assert storage.pull_interval == 60 # default value
@pytest.mark.parametrize(
"path",
[
"/path/to/local/flows",
"C:\\path\\to\\local\\flows",
"file:///path/to/local/flows",
"flows", # Relative Path
],
)
def test_local_storage_path(self, path):
storage = create_storage_from_source(path)
path = path.split("://")[-1] # split from Scheme when present
assert isinstance(storage, LocalStorage)
assert storage._path == Path(path).resolve()
assert storage.pull_interval == 60 # default value
@pytest.fixture
def mock_run_process(monkeypatch):
mock_run_process = AsyncMock()
result_mock = MagicMock()
result_mock.stdout = "https://github.com/org/repo.git".encode()
mock_run_process.return_value = result_mock
monkeypatch.setattr("prefect.runner.storage.run_process", mock_run_process)
return mock_run_process
| TestCreateStorageFromSource |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 2861,
"end": 2940
} | class ____(RelationBase):
field_a = models.CharField(max_length=30)
| RelationA |
python | pytorch__pytorch | test/inductor/test_custom_op_autotune.py | {
"start": 732,
"end": 16961
} | class ____(TestCase):
"""Test custom operation autotuning functionality."""
def setUp(self) -> None:
"""Set up test environment with appropriate device and dtype."""
super().setUp()
self.device = "cuda" if HAS_GPU else "cpu"
self.dtype = torch.float16 if self.device == "cuda" else torch.float32
def _run_autotune_test(self, op_object, inputs, expected, test_name):
"""Shared test infrastructure for autotuning tests."""
@torch.compile
def test_model(*args):
return op_object(*args)
torch._dynamo.reset()
autotune_backends = "TRITON" if self.device == "cuda" else "ATEN"
with config.patch(
max_autotune=True,
max_autotune_gemm_backends=autotune_backends,
fx_graph_cache=False,
benchmark_kernel=True,
):
compiled_result = test_model(*inputs)
self.assertEqual(
compiled_result.shape, expected.shape, f"{test_name} shape mismatch"
)
torch.testing.assert_close(
compiled_result,
expected,
rtol=2e-1,
atol=5e-1,
msg=f"{test_name} numerical mismatch",
)
def _assert_implementations_equivalent(self, decompositions, inputs, op_name):
"""Utility to assert that all implementations produce equivalent results."""
implementations = [(func.__name__, func) for func in decompositions]
results = {}
for name, impl in implementations:
result = impl(*inputs)
results[name] = result
# Basic sanity checks
self.assertTrue(
torch.isfinite(result).all(),
f"{op_name} {name} produced non-finite values",
)
# Verify numerical equivalence
reference_name, reference_result = next(iter(results.items()))
for name, result in results.items():
if name != reference_name:
rtol = 1e-1 if "Approximated" in name else 1e-2
atol = 1e-1 if "Approximated" in name else 1e-2
torch.testing.assert_close(
result,
reference_result,
rtol=rtol,
atol=atol,
msg=f"{op_name} {name} differs from {reference_name}",
)
def _create_rmsnorm_inputs(self, batch_size=32, seq_len=2048, hidden_dim=512):
"""Create test inputs for RMSNorm operations."""
input_tensor = torch.randn(
batch_size,
seq_len,
hidden_dim,
device=self.device,
dtype=self.dtype,
requires_grad=False,
)
weight = torch.randn(
hidden_dim, device=self.device, dtype=self.dtype, requires_grad=False
)
return input_tensor, weight
def _create_mlp_inputs(
self,
batch_size=2,
seq_len=32,
hidden_dim=512,
intermediate_dim=1024,
output_dim=256,
):
"""Create test inputs for MLP operations."""
input_tensor = torch.randn(
batch_size,
seq_len,
hidden_dim,
device=self.device,
dtype=self.dtype,
requires_grad=False,
)
gate_weight = torch.randn(
hidden_dim,
intermediate_dim,
device=self.device,
dtype=self.dtype,
requires_grad=False,
)
up_weight = torch.randn(
hidden_dim,
intermediate_dim,
device=self.device,
dtype=self.dtype,
requires_grad=False,
)
down_weight = torch.randn(
intermediate_dim,
output_dim,
device=self.device,
dtype=self.dtype,
requires_grad=False,
)
return input_tensor, gate_weight, up_weight, down_weight
@skipIfXpu
def test_rmsnorm_custom_op_autotune_with_dynamic_shape(self):
"""Test RMSNorm autotuning with multiple decomposition variants and dynamic shapes.
Validates:
- Multiple decomposition implementations with different computational approaches
- Dynamic shape handling across multiple compilations
"""
test_op_name = f"test_lib::rmsnorm_{id(self)}"
def rmsnorm_decomposition1(
x: torch.Tensor, weight: torch.Tensor, eps: float = 1e-8
) -> torch.Tensor:
"""Variance-based approach: compute variance then rsqrt."""
variance = x.pow(2).mean(dim=-1, keepdim=True)
rstd = torch.rsqrt(variance + eps)
return x * rstd * weight
def rmsnorm_decomposition2(
x: torch.Tensor, weight: torch.Tensor, eps: float = 1e-8
) -> torch.Tensor:
"""Separate normalization and scaling: compute normalized value then scale."""
x_var = x
variance = x_var.pow(2).mean(dim=-1, keepdim=True)
x = x * torch.rsqrt(variance + eps)
x = x * weight
return x
@torch.library.custom_op(test_op_name, mutates_args=())
def test_rmsnorm_op(
input_tensor: torch.Tensor, weight: torch.Tensor, eps: float = 1e-8
) -> torch.Tensor:
return torch.nn.functional.rms_norm(
input_tensor, input_tensor.shape[-1:], weight, eps=eps
)
@test_rmsnorm_op.register_fake
def _(input_tensor: torch.Tensor, weight: torch.Tensor, eps: float = 1e-8):
return torch.empty_like(input_tensor)
decompositions = [
rmsnorm_decomposition1,
rmsnorm_decomposition2,
]
register_custom_op_autotuning(
test_rmsnorm_op,
configs=[CustomOpConfig(decomp) for decomp in decompositions],
name="test_rmsnorm_autotuned",
input_gen_fns={
"x": lambda x: torch.randn_like(x, device=self.device) * 0.02,
"weight": lambda weight: torch.ones_like(weight, device=self.device),
},
)
# Test multiple shapes to verify dynamic shape handling
test_shapes = [(2, 16, 128), (8, 32, 256)]
for i, (batch_size, seq_len, hidden_dim) in enumerate(test_shapes):
input_tensor, weight = self._create_rmsnorm_inputs(
batch_size, seq_len, hidden_dim
)
# Test numerical equivalence for all decompositions
self._assert_implementations_equivalent(
decompositions, (input_tensor, weight), f"RMSNorm_{i}"
)
# Test autotuning
expected = rmsnorm_decomposition1(input_tensor, weight)
self._run_autotune_test(
test_rmsnorm_op, (input_tensor, weight), expected, f"RMSNorm_{i}"
)
def _create_decompose_k_inputs(self, m=256, k=65536, n=1024):
"""Create test inputs for decompose_k matrix multiplication.
Tensor a: Input matrix of shape (m, k)
Tensor b: Weight matrix of shape (k, n)
Tensor bias: Bias vector of shape (n,)
"""
# Ensure k is divisible by all k_splits values: [2, 32, 64, 128, 256]
k = ((k + 255) // 256) * 256 # Round up to nearest multiple of 256
a = torch.randn(m, k, device=self.device, dtype=self.dtype, requires_grad=False)
b = torch.randn(k, n, device=self.device, dtype=self.dtype, requires_grad=False)
bias = (
torch.randn(n, device=self.device, dtype=self.dtype, requires_grad=False)
* 0.1
)
return a, b, bias
@skipIfXpu
def test_decompose_k_custom_op_autotune_dynamic_config_for_input_shape(self):
"""Test decompose_k autotuning with with epilogue fusion(matmul+bias+relu+scale) and
dynamic config generation based on matmul input shapes.
Validates that the custom op encapsulates the entire fused operation (matmul + bias
+ relu + scale) with parametric tuning for k_splits values controlling how the K
dimension is decomposed. The config generator receives correct parameter names and
shapes, dynamically generates different k_split configs using get_k_splits for
different input shapes, and produces correct results matching the reference implementation.
"""
test_op_name = f"test_lib::matmul_relu_epilogue_dynamic_{id(self)}"
def decompose_k_implementation(
a: torch.Tensor, b: torch.Tensor, k_splits: int = 4
) -> torch.Tensor:
"""Matrix multiply with k-way decomposition."""
m = a.shape[0]
n = b.shape[1]
k = a.shape[1]
k_parts = k // k_splits
B = k_splits
a_reshaped = torch.permute(
a.reshape(m, B, k_parts), (1, 0, 2)
) # [B, m, k_parts]
b_reshaped = b.reshape(B, k_parts, n) # [B, k_parts, n]
result = torch.bmm(a_reshaped, b_reshaped) # [B, m, n]
return torch.sum(result, dim=0) # [m, n]
@torch.library.custom_op(test_op_name, mutates_args=())
def matmul_relu_epilogue_dynamic_op(
a: torch.Tensor, b: torch.Tensor, bias: torch.Tensor, k_splits: int = 4
) -> torch.Tensor:
"""Matmul with decompose_k + bias + relu + scale (complete epilogue fusion)."""
matmul_result = decompose_k_implementation(a, b, k_splits)
biased = matmul_result + bias
activated = torch.relu(biased)
scaled = activated * 2.0
return scaled
@matmul_relu_epilogue_dynamic_op.register_fake
def _(a: torch.Tensor, b: torch.Tensor, bias: torch.Tensor, k_splits: int = 4):
return torch.empty(a.shape[0], b.shape[1], device=a.device, dtype=a.dtype)
# Define dynamic config generator using get_k_splits
def generate_k_split_configs(
fake_tensors: dict[str, torch.Tensor],
) -> list[CustomOpConfig]:
"""Generate k_split configs based on input matrix dimensions."""
from torch._inductor.utils import get_k_splits
m, k = fake_tensors["a"].shape[-2:]
_, n = fake_tensors["b"].shape[-2:]
k_splits_list = get_k_splits(m, n, k)
return [CustomOpConfig(k_splits=k) for k in k_splits_list]
register_custom_op_autotuning(
matmul_relu_epilogue_dynamic_op,
config_generator=generate_k_split_configs,
name="matmul_relu_epilogue_dynamic_autotuned",
input_gen_fns={
"a": lambda fake_tensor: torch.randn_like(
fake_tensor, device=self.device
)
* 0.1,
"b": lambda fake_tensor: torch.randn_like(
fake_tensor, device=self.device
)
* 0.1,
"bias": lambda fake_tensor: torch.randn_like(
fake_tensor, device=self.device
)
* 0.1,
},
)
# Test multiple shapes to verify dynamic config generation
test_shapes = [
(256, 16384, 1024),
(256, 65536, 1024),
]
for m, k, n in test_shapes:
# Use helper function to create test inputs
a, b, bias = self._create_decompose_k_inputs(m, k, n)
@torch.compile
def test_model(a, b, bias):
return matmul_relu_epilogue_dynamic_op(a, b, bias)
torch._dynamo.reset()
with config.patch(
max_autotune=True,
benchmark_fusion=True,
):
compiled_result = test_model(a, b, bias)
def reference_model(a, b, bias):
matmul_result = a @ b
biased = matmul_result + bias
activated = torch.relu(biased)
scaled = activated * 2.0
return scaled
expected = reference_model(a, b, bias)
torch.testing.assert_close(
compiled_result,
expected,
rtol=2e-1,
atol=5e-1,
msg=f"Failed for shape ({m}, {k}, {n})",
)
@skipIfXpu
def test_multi_parameter_tuning(self):
"""Test autotuning with multiple parameters for combinatorial parameter exploration.
Validates parametric tuning with multiple parameters (scale_mode and chunk_size)
to test combinatorial exploration of the parameter space.
"""
test_op_name = f"test_lib::multi_param_{id(self)}"
def multi_param_scaling(
x: torch.Tensor,
factor: torch.Tensor,
scale_mode: int = 1,
chunk_size: int = 16,
) -> torch.Tensor:
"""Different scaling approaches controlled by scale_mode parameter."""
if scale_mode == 1:
# Simple broadcasting
return x * factor
elif scale_mode == 2:
# Process in chunks
batch_size, seq_len = x.shape[:2]
chunks = []
for start in range(0, seq_len, chunk_size):
end = min(start + chunk_size, seq_len)
chunk = x[:, start:end]
chunks.append(chunk * factor)
return torch.cat(chunks, dim=1)
elif scale_mode == 3:
# Using einsum for scaling
return torch.einsum("...i,i->...i", x, factor)
@torch.library.custom_op(test_op_name, mutates_args=())
def multi_param_op(
x: torch.Tensor,
factor: torch.Tensor,
scale_mode: int = 1,
chunk_size: int = 16,
) -> torch.Tensor:
return multi_param_scaling(x, factor, scale_mode, chunk_size)
@multi_param_op.register_fake
def _(
x: torch.Tensor,
factor: torch.Tensor,
scale_mode: int = 1,
chunk_size: int = 16,
):
return torch.empty_like(x)
# Use explicit configs with scale_mode and chunk_size parameters as tuning knobs
register_custom_op_autotuning(
multi_param_op,
configs=[
CustomOpConfig(scale_mode=1), # Broadcast
CustomOpConfig(scale_mode=2, chunk_size=16), # Chunked 16
CustomOpConfig(scale_mode=2, chunk_size=32), # Chunked 32
CustomOpConfig(scale_mode=3), # Einsum
],
name="multi_param_autotuned",
input_gen_fns={
"x": lambda t: torch.randn_like(t, device=self.device) * 0.1,
"factor": lambda t: torch.ones(
t.shape[-1], device=self.device, dtype=t.dtype
),
},
)
# Create test inputs
test_x = torch.randn(4, 64, 128, device=self.device, dtype=self.dtype)
test_factor = torch.ones(128, device=self.device, dtype=self.dtype) * 2.0
# Verify numerical equivalence across all approaches
expected_result = test_x * test_factor
# Test each scale_mode variant
configs = [
(1, 16), # broadcast, chunk_size ignored
(2, 16), # chunked with size 16
(2, 32), # chunked with size 32
(3, 16), # einsum, chunk_size ignored
]
for scale_mode, chunk_size in configs:
result = multi_param_scaling(
test_x, test_factor, scale_mode=scale_mode, chunk_size=chunk_size
)
torch.testing.assert_close(
result,
expected_result,
rtol=1e-5,
atol=1e-5,
msg=f"scale_mode {scale_mode} with chunk_size {chunk_size} not equivalent to expected",
)
# Test autotuning
self._run_autotune_test(
multi_param_op, (test_x, test_factor), expected_result, "MultiParam"
)
if __name__ == "__main__":
run_tests()
| TestCustomOpAutoTune |
python | google__pytype | pytype/tests/test_super1.py | {
"start": 99,
"end": 7225
} | class ____(test_base.BaseTest):
"""Tests for super()."""
def test_set_attr(self):
self.Check("""
class Foo:
def foo(self, name, value):
super(Foo, self).__setattr__(name, value)
""")
def test_str(self):
self.Check("""
class Foo:
def foo(self, name, value):
super(Foo, self).__str__()
""")
def test_get(self):
self.Check("""
class Foo:
def foo(self, name, value):
super(Foo, self).__get__(name)
""")
def test_inherited_get(self):
self.Check("""
class Foo:
def __get__(self, obj, objtype):
return 42
class Bar(Foo):
def __get__(self, obj, objtype):
return super(Bar, self).__get__(obj, objtype)
class Baz:
x = Bar()
Baz().x + 1
""")
def test_inherited_get_grandparent(self):
self.Check("""
class Foo:
def __get__(self, obj, objtype):
return 42
class Mid(Foo):
pass
class Bar(Mid):
def __get__(self, obj, objtype):
return super(Bar, self).__get__(obj, objtype)
class Baz:
x = Bar()
Baz().x + 1
""")
def test_inherited_get_multiple(self):
self.Check("""
class Foo:
def __get__(self, obj, objtype):
return 42
class Quux:
pass
class Bar(Quux, Foo):
def __get__(self, obj, objtype):
return super(Bar, self).__get__(obj, objtype)
class Baz:
x = Bar()
Baz().x + 1
""")
def test_set(self):
errors = self.CheckWithErrors("""
class Foo:
def foo(self, name, value):
super(Foo, self).__set__(name, value) # attribute-error[e]
""")
self.assertErrorRegexes(errors, {"e": r"__set__.*super"})
def test_inherited_set(self):
self.Check("""
class Foo:
def __init__(self):
self.foo = 1
def __set__(self, name, value):
self.foo = value
class Bar(Foo):
def __set__(self, name, value):
super(Bar, self).__set__(name, value)
class Baz():
x = Bar()
y = Baz()
y.x = 42
""")
def test_init(self):
self.Check("""
class Foo:
def foo(self, name, value):
super(Foo, self).__init__()
""")
def test_getattr(self):
self.Check("""
class Foo:
def hello(self, name):
getattr(super(Foo, self), name)
""")
def test_getattr_multiple_inheritance(self):
self.Check("""
class X:
pass
class Y:
bla = 123
class Foo(X, Y):
def hello(self):
getattr(super(Foo, self), "bla")
""")
def test_getattr_inheritance(self):
self.Check("""
class Y:
bla = 123
class Foo(Y):
def hello(self):
getattr(super(Foo, self), "bla")
""")
def test_isinstance(self):
self.Check("""
class Y:
pass
class Foo(Y):
def hello(self):
return isinstance(super(Foo, self), Y)
""")
def test_call_super(self):
errorlog = self.CheckWithErrors("""
class Y:
pass
class Foo(Y):
def hello(self):
return super(Foo, self)() # not-callable[e]
""")
self.assertErrorRegexes(errorlog, {"e": r"super"})
def test_super_type(self):
ty = self.Infer("""
class A:
pass
x = super(type, A)
""")
self.assertTypesMatchPytd(
ty,
"""
class A:
pass
x = ... # type: super
""",
)
def test_super_with_ambiguous_base(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class Grandparent:
def f(self) -> int: ...
""",
)
ty = self.Infer(
"""
import foo
class Parent(foo.Grandparent):
pass
OtherParent = __any_object__
class Child(OtherParent, Parent):
def f(self):
return super(Parent, self).f()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
class Parent(foo.Grandparent): ...
OtherParent = ... # type: Any
class Child(Any, Parent):
def f(self) -> int: ...
""",
)
def test_super_with_any(self):
self.Check("""
super(__any_object__, __any_object__)
""")
def test_single_argument_super(self):
errors = self.CheckWithErrors("""
super(object)
super(object()) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"cls: type.*cls: object"})
def test_method_on_single_argument_super(self):
ty, errors = self.InferWithErrors("""
sup = super(object)
sup.foo # attribute-error[e1]
sup.__new__(object) # wrong-arg-types[e2]
v = sup.__new__(super)
""")
self.assertTypesMatchPytd(
ty,
"""
sup = ... # type: super
v = ... # type: super
""",
)
self.assertErrorRegexes(
errors,
{"e1": r"'foo' on super", "e2": r"type\[super\].*type\[object\]"},
)
def test_super_under_decorator(self):
self.Check("""
def decorate(cls):
return __any_object__
class Parent:
def Hello(self):
pass
@decorate
class Child(Parent):
def Hello(self):
return super(Child, self).Hello()
""")
def test_super_set_attr(self):
errors = self.CheckWithErrors("""
class Foo:
def __init__(self):
super(Foo, self).foo = 42 # not-writable[e]
""")
self.assertErrorRegexes(errors, {"e": r"super"})
def test_super_subclass_set_attr(self):
errors = self.CheckWithErrors("""
class Foo: pass
class Bar(Foo):
def __init__(self):
super(Bar, self).foo = 42 # not-writable[e]
""")
self.assertErrorRegexes(errors, {"e": r"super"})
def test_super_nothing_set_attr(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class Foo(nothing): ...
""",
)
_, errors = self.InferWithErrors(
"""
import foo
class Bar(foo.Foo):
def __init__(self):
super(foo.Foo, self).foo = 42 # not-writable[e]
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e": r"super"})
def test_super_any_set_attr(self):
errors = self.CheckWithErrors("""
class Foo(__any_object__):
def __init__(self):
super(Foo, self).foo = 42 # not-writable[e]
""")
self.assertErrorRegexes(errors, {"e": r"super"})
@test_base.skip("pytype thinks the two Foo classes are the same")
def test_duplicate_class_names(self):
self.Check("""
class Foo:
def __new__(self, *args, **kwargs):
typ = type('Foo', (Foo,), {})
return super(Foo, typ).__new__(typ)
def __init__(self, x):
super(Foo, self).__init__()
""")
if __name__ == "__main__":
test_base.main()
| SuperTest |
python | google__pytype | pytype/tests/test_operators1.py | {
"start": 8062,
"end": 10923
} | class ____(test_base.BaseTest, test_utils.OperatorsTestMixin):
"""Tests for reverse operators."""
def test_add(self):
self.check_reverse("add", "+")
def test_and(self):
self.check_reverse("and", "&")
def test_floordiv(self):
self.check_reverse("floordiv", "//")
def test_lshift(self):
self.check_reverse("lshift", "<<")
def test_rshift(self):
self.check_reverse("rshift", ">>")
def test_mod(self):
self.check_reverse("mod", "%")
def test_mul(self):
self.check_reverse("mul", "*")
def test_or(self):
self.check_reverse("or", "|")
def test_pow(self):
self.check_reverse("pow", "**")
def test_sub(self):
self.check_reverse("sub", "-")
def test_custom(self):
with test_utils.Tempdir() as d:
d.create_file(
"test.pyi",
"""
from typing import Tuple
class Test():
def __or__(self, other: Tuple[int, ...]) -> bool: ...
def __ror__(self, other: Tuple[int, ...]) -> bool: ...
""",
)
ty = self.Infer(
"""
import test
x = test.Test() | (1, 2)
y = (1, 2) | test.Test()
def f(t):
return t | (1, 2)
def g(t):
return (1, 2) | t
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import test
from typing import Any
x = ... # type: bool
y = ... # type: bool
def f(t) -> Any: ...
def g(t) -> Any: ...
""",
)
def test_custom_reverse_unused(self):
self.Check("""
class Foo:
def __sub__(self, other):
return 42
def __rsub__(self, other):
return ""
(Foo() - Foo()).real
""")
def test_inherited_custom_reverse_unused(self):
self.Check("""
class Foo:
def __sub__(self, other):
return 42
def __rsub__(self, other):
return ""
class Bar(Foo):
pass
(Foo() - Bar()).real
""")
def test_custom_reverse_only(self):
self.Check("""
class Foo:
def __sub__(self, other):
return ""
class Bar(Foo):
def __rsub__(self, other):
return 42
(Foo() - Bar()).real
""")
def test_unknown_left(self):
self.Check("""
class Foo:
def __rsub__(self, other):
return ""
(__any_object__ - Foo()).real
""")
def test_unknown_right(self):
# Reverse operators are rare enough that it makes sense to assume that the
# regular operator was called when the right side is ambiguous.
errors = self.CheckWithErrors("""
class Foo:
def __sub__(self, other):
return ""
(Foo() - __any_object__).real # attribute-error[e]
""")
self.assertErrorRegexes(errors, {"e": r"real.*str"})
| ReverseTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1033261,
"end": 1034063
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of
UpdateOrganizationAllowPrivateRepositoryForkingSetting
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "message", "organization")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of updating the allow private
repository forking setting.
"""
organization = sgqlc.types.Field("Organization", graphql_name="organization")
"""The organization with the updated allow private repository forking
setting.
"""
| UpdateOrganizationAllowPrivateRepositoryForkingSettingPayload |
python | chardet__chardet | chardet/gb2312prober.py | {
"start": 1297,
"end": 1693
} | class ____(MultiByteCharSetProber):
def __init__(self) -> None:
super().__init__()
self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
self.distribution_analyzer = GB2312DistributionAnalysis()
self.reset()
@property
def charset_name(self) -> str:
return "GB2312"
@property
def language(self) -> str:
return "Chinese"
| GB2312Prober |
python | Netflix__metaflow | metaflow/system/system_utils.py | {
"start": 27,
"end": 657
} | class ____(object):
def __init__(self, name="not_a_real_flow"):
self.name = name
# This function is used to initialize the environment outside a flow.
def init_environment_outside_flow(
flow: Union["metaflow.flowspec.FlowSpec", "metaflow.sidecar.DummyFlow"]
) -> "metaflow.metaflow_environment.MetaflowEnvironment":
from metaflow.plugins import ENVIRONMENTS
from metaflow.metaflow_config import DEFAULT_ENVIRONMENT
from metaflow.metaflow_environment import MetaflowEnvironment
return [
e for e in ENVIRONMENTS + [MetaflowEnvironment] if e.TYPE == DEFAULT_ENVIRONMENT
][0](flow)
| DummyFlow |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.