language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
streamlit__streamlit
|
lib/streamlit/web/server/server.py
|
{
"start": 6021,
"end": 10396
}
|
class ____(Exception):
pass
def server_port_is_manually_set() -> bool:
return config.is_manually_set("server.port")
def server_address_is_unix_socket() -> bool:
address = config.get_option("server.address")
return address is not None and address.startswith(UNIX_SOCKET_PREFIX)
def start_listening(app: tornado.web.Application) -> None:
"""Makes the server start listening at the configured port.
In case the port is already taken it tries listening to the next available
port. It will error after MAX_PORT_SEARCH_RETRIES attempts.
"""
cert_file = config.get_option("server.sslCertFile")
key_file = config.get_option("server.sslKeyFile")
ssl_options = _get_ssl_options(cert_file, key_file)
http_server = HTTPServer(
app,
max_buffer_size=config.get_option("server.maxUploadSize") * 1024 * 1024,
ssl_options=ssl_options,
)
if server_address_is_unix_socket():
start_listening_unix_socket(http_server)
else:
start_listening_tcp_socket(http_server)
def _get_ssl_options(cert_file: str | None, key_file: str | None) -> SSLContext | None:
if bool(cert_file) != bool(key_file):
_LOGGER.error(
"Options 'server.sslCertFile' and 'server.sslKeyFile' must "
"be set together. Set missing options or delete existing options."
)
sys.exit(1)
if cert_file and key_file:
# ssl_ctx.load_cert_chain raise exception as below, but it is not
# sufficiently user-friendly
# FileNotFoundError: [Errno 2] No such file or directory
if not Path(cert_file).exists():
_LOGGER.error("Cert file '%s' does not exist.", cert_file)
sys.exit(1)
if not Path(key_file).exists():
_LOGGER.error("Key file '%s' does not exist.", key_file)
sys.exit(1)
import ssl
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
# When the SSL certificate fails to load, an exception is raised as below,
# but it is not sufficiently user-friendly.
# ssl.SSLError: [SSL] PEM lib (_ssl.c:4067)
try:
ssl_ctx.load_cert_chain(cert_file, key_file)
except ssl.SSLError:
_LOGGER.exception(
"Failed to load SSL certificate. Make sure "
"cert file '%s' and key file '%s' are correct.",
cert_file,
key_file,
)
sys.exit(1)
return ssl_ctx
return None
def start_listening_unix_socket(http_server: HTTPServer) -> None:
address = config.get_option("server.address")
file_name = os.path.expanduser(address[len(UNIX_SOCKET_PREFIX) :])
import tornado.netutil
if hasattr(tornado.netutil, "bind_unix_socket"):
unix_socket = tornado.netutil.bind_unix_socket(file_name)
http_server.add_socket(unix_socket)
else:
_LOGGER.error(
"Unix socket support is not available in this version of Tornado."
)
sys.exit(1)
def start_listening_tcp_socket(http_server: HTTPServer) -> None:
call_count = 0
port = None
while call_count < MAX_PORT_SEARCH_RETRIES:
address = config.get_option("server.address")
port = config.get_option("server.port")
try:
http_server.listen(port, address)
break # It worked! So let's break out of the loop.
except OSError as e:
if e.errno == errno.EADDRINUSE:
if server_port_is_manually_set():
_LOGGER.error("Port %s is already in use", port) # noqa: TRY400
sys.exit(1)
else:
_LOGGER.debug(
"Port %s already in use, trying to use the next one.", port
)
port += 1
config.set_option(
"server.port", port, ConfigOption.STREAMLIT_DEFINITION
)
call_count += 1
else:
raise
if call_count >= MAX_PORT_SEARCH_RETRIES:
raise RetriesExceededError(
f"Cannot start Streamlit server. Port {port} is already in use, and "
f"Streamlit was unable to find a free port after {MAX_PORT_SEARCH_RETRIES} attempts.",
)
|
RetriesExceededError
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/distribute_coordinator.py
|
{
"start": 1289,
"end": 1497
}
|
class ____(object):
PS = "ps"
WORKER = "worker"
CHIEF = "chief"
EVALUATOR = "evaluator"
CLIENT = "client"
# TODO(yuefengz): support another mode where the client colocates with one
# worker.
|
_TaskType
|
python
|
Pylons__pyramid
|
tests/test_predicates.py
|
{
"start": 17115,
"end": 17327
}
|
class ____:
def __init__(self, result):
self.result = result
def text(self):
return self.result
phash = text
def __call__(self, context, request):
return True
|
DummyPredicate
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/django/toystore/forms.py
|
{
"start": 7116,
"end": 7296
}
|
class ____(ReprModelForm):
company = forms.ModelChoiceField(queryset=Company.objects.order_by("name"))
class Meta:
model = Store
fields = "__all__"
|
StoreForm
|
python
|
spyder-ide__spyder
|
spyder/plugins/editor/tests/test_editor_config_dialog.py
|
{
"start": 554,
"end": 1186
}
|
class ____(QMainWindow):
register_shortcut = Mock()
file_menu_actions = []
file_toolbar_actions = []
statusbar = Mock()
new_instance = Mock()
plugin_focus_changed = Mock()
fallback_completions = Mock()
ipyconsole = Mock()
mainmenu = Mock()
sig_setup_finished = Mock()
switcher = Mock()
@pytest.mark.parametrize(
'config_dialog',
# [[MainWindowMock, [ConfigPlugins], [Plugins]]]
[[MainWindowMock, [], [Editor]]],
indirect=True)
def test_config_dialog(config_dialog):
configpage = config_dialog.get_page()
assert configpage
configpage.save_to_conf()
|
MainWindowMock
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/implementation/fetch_runs.py
|
{
"start": 14075,
"end": 27318
}
|
class ____:
"""Three part cursor for paginating the Runs Feed. The run_cursor is the run_id of the oldest run that
has been returned. The backfill_cursor is the id of the oldest backfill that has been returned. The
timestamp is the timestamp of the oldest entry (run or backfill).
If the run/backfill cursor is None, that means that no runs/backfills have been returned yet and querying
should begin at the start of the table. Once all runs/backfills in the table have been returned, the
corresponding cursor should still be set to the id of the last run/backfill returned.
The timestamp is used for the following case. If a deployment has 20 runs and 0 backfills, and a query is
made for 10 Runs Feed entries, the first 10 runs will be returned. At this time, the run_cursor will be an id,
and the backfill_cursor will be None. Then a backfill is created. If a second query is made for 10 Runs Feed entries
the newly created backfill will get included in the list, even though it should be included on the first page by time
order. To prevent this, the timestamp is used to ensure that all returned entires are older than the entries on the
previous page.
"""
run_cursor: Optional[str]
backfill_cursor: Optional[str]
timestamp: Optional[float]
def to_string(self) -> str:
return f"{self.run_cursor if self.run_cursor else ''}{_DELIMITER}{self.backfill_cursor if self.backfill_cursor else ''}{_DELIMITER}{self.timestamp if self.timestamp else ''}"
@staticmethod
def from_string(serialized: Optional[str]):
if serialized is None:
return RunsFeedCursor(
run_cursor=None,
backfill_cursor=None,
timestamp=None,
)
parts = serialized.split(_DELIMITER)
if len(parts) != 3:
raise DagsterInvariantViolationError(f"Invalid cursor for querying runs: {serialized}")
return RunsFeedCursor(
run_cursor=parts[0] if parts[0] else None,
backfill_cursor=parts[1] if parts[1] else None,
timestamp=float(parts[2]) if parts[2] else None,
)
def _fetch_runs_not_in_backfill(
instance: DagsterInstance,
cursor: Optional[str],
limit: int,
filters: Optional[RunsFilter],
) -> Sequence[RunRecord]:
"""Fetches limit RunRecords that are not part of a backfill and were created before a given timestamp."""
runs = []
while len(runs) < limit:
# fetch runs in a loop and discard runs that are part of a backfill until we have
# limit runs to return or have reached the end of the runs table
new_runs = instance.get_run_records(limit=limit, cursor=cursor, filters=filters)
if len(new_runs) == 0:
return runs
cursor = new_runs[-1].dagster_run.run_id
runs.extend([run for run in new_runs if run.dagster_run.tags.get(BACKFILL_ID_TAG) is None])
return runs[:limit]
RUN_STATUS_TO_BULK_ACTION_STATUSES = {
DagsterRunStatus.SUCCESS: [BulkActionStatus.COMPLETED_SUCCESS],
DagsterRunStatus.FAILURE: [BulkActionStatus.FAILED, BulkActionStatus.COMPLETED_FAILED],
DagsterRunStatus.CANCELED: [BulkActionStatus.CANCELED],
DagsterRunStatus.CANCELING: [BulkActionStatus.CANCELING],
DagsterRunStatus.STARTED: [BulkActionStatus.REQUESTED],
}
def _bulk_action_statuses_from_run_statuses(
statuses: Sequence[DagsterRunStatus],
) -> Sequence[BulkActionStatus]:
full_list = []
for status in statuses:
full_list.extend(RUN_STATUS_TO_BULK_ACTION_STATUSES.get(status, []))
return full_list
def _filters_apply_to_backfills(filters: RunsFilter) -> bool:
# the following filters do not apply to backfills, so skip fetching backfills if they are set
if (
(filters.run_ids is not None and len(filters.run_ids) > 0)
or filters.updated_after is not None
or filters.updated_before is not None
or filters.snapshot_id is not None
):
return False
# if filtering by statuses and all are not valid backfill statuses, skip fetching backfills
if filters.statuses and len(_bulk_action_statuses_from_run_statuses(filters.statuses)) == 0:
return False
return True
def _bulk_action_filters_from_run_filters(filters: RunsFilter) -> BulkActionsFilter:
converted_statuses = (
_bulk_action_statuses_from_run_statuses(filters.statuses) if filters.statuses else None
)
backfill_ids = None
if filters.tags.get(BACKFILL_ID_TAG) is not None:
backfill_ids = filters.tags[BACKFILL_ID_TAG]
if isinstance(backfill_ids, str):
backfill_ids = [backfill_ids]
tags = (
{key: value for key, value in filters.tags.items() if key != BACKFILL_ID_TAG}
if filters.tags
else None
)
return BulkActionsFilter(
created_before=filters.created_before,
created_after=filters.created_after,
statuses=converted_statuses,
job_name=filters.job_name,
tags=tags,
backfill_ids=backfill_ids,
)
def _replace_created_before_with_cursor(
filters: RunsFilter, created_before_cursor: Optional[datetime.datetime]
):
"""After the first page of results is returned, created_before_cursor will be less than
filters.created_before. For pagination of results to work, we need to ensure that the
created_before filter is set to the minimum of created_before_cursor and filters.created_before.
"""
if filters.created_before and created_before_cursor:
created_before = min(created_before_cursor, filters.created_before)
elif created_before_cursor:
created_before = created_before_cursor
elif filters.created_before:
created_before = filters.created_before
else: # no created_before should be applied, return filters as is
return filters
return copy(filters, created_before=created_before)
def get_runs_feed_entries(
graphene_info: "ResolveInfo",
limit: int,
filters: Optional[RunsFilter],
view: "GrapheneRunsFeedView",
cursor: Optional[str] = None,
) -> "GrapheneRunsFeedConnection":
"""Returns a GrapheneRunsFeedConnection, which contains a merged list of backfills and
single runs (runs that are not part of a backfill), the cursor to fetch the next page,
and a boolean indicating if there are more results to fetch.
Args:
limit (int): max number of results to return
cursor (Optional[str]): String that can be deserialized into a RunsFeedCursor. If None, indicates
that querying should start at the beginning of the table for both runs and backfills.
filters (Optional[RunsFilter]): Filters to apply to the runs. If None, no filters are applied.
view (RunsFeedView): If True, include runs that are part of a backfill in the results and exclude backfill objects
"""
from dagster_graphql.schema.backfill import GraphenePartitionBackfill
from dagster_graphql.schema.pipelines.pipeline import GrapheneRun
from dagster_graphql.schema.runs_feed import GrapheneRunsFeedConnection, GrapheneRunsFeedView
check.opt_str_param(cursor, "cursor")
check.int_param(limit, "limit")
check.opt_inst_param(filters, "filters", RunsFilter)
instance = graphene_info.context.instance
runs_feed_cursor = RunsFeedCursor.from_string(cursor)
# In the default "ROOTS" run feed, we exclude runs that are part of backfills. If
# the user chooses the "RUNS" view, we want to flatten backfills into their runs.
exclude_subruns = view == GrapheneRunsFeedView.ROOTS
# if using limit, fetch limit+1 of each type to know if there are more than limit remaining
fetch_limit = limit + 1
# filter out any backfills/runs that are newer than the cursor timestamp. See RunsFeedCursor docstring
# for case when this is necessary
created_before_cursor = (
datetime_from_timestamp(runs_feed_cursor.timestamp) if runs_feed_cursor.timestamp else None
)
should_fetch_backfills = (
view == GrapheneRunsFeedView.ROOTS or view == GrapheneRunsFeedView.BACKFILLS
) and (_filters_apply_to_backfills(filters) if filters else True)
if filters:
check.invariant(
filters.exclude_subruns is None,
"filters.exclude_subruns must be None when fetching the runs feed. Use include_runs_from_backfills instead.",
)
with disable_dagster_warnings():
run_filters = copy(filters, exclude_subruns=exclude_subruns)
run_filters = _replace_created_before_with_cursor(run_filters, created_before_cursor)
backfill_filters = (
_bulk_action_filters_from_run_filters(run_filters) if should_fetch_backfills else None
)
else:
with disable_dagster_warnings():
run_filters = RunsFilter(
created_before=created_before_cursor, exclude_subruns=exclude_subruns
)
backfill_filters = BulkActionsFilter(created_before=created_before_cursor)
if should_fetch_backfills:
backfills = [
GraphenePartitionBackfill(backfill)
for backfill in instance.get_backfills(
cursor=runs_feed_cursor.backfill_cursor,
limit=fetch_limit,
filters=backfill_filters,
)
]
else:
backfills = []
# if we are not showing runs within backfills and the backfill_id filter is set, we know
# there will be no results, so we can skip fetching runs
should_fetch_runs = (
view == GrapheneRunsFeedView.ROOTS or view == GrapheneRunsFeedView.RUNS
) and not (exclude_subruns and run_filters.tags.get(BACKFILL_ID_TAG) is not None)
if should_fetch_runs:
runs = [
GrapheneRun(run)
for run in instance.get_run_records(
limit=fetch_limit, cursor=runs_feed_cursor.run_cursor, filters=run_filters
)
]
else:
runs = []
# if we fetched limit+1 of either runs or backfills, we know there must be more results
# to fetch on the next call since we will return limit results for this call. Additionally,
# if we fetched more than limit of runs and backfill combined, we know there are more results
has_more = (
len(backfills) == fetch_limit
or len(runs) == fetch_limit
or len(backfills) + len(runs) > limit
)
all_entries = backfills + runs
# order runs and backfills by create_time. typically we sort by storage id but that won't work here since
# they are different tables
all_entries = sorted(
all_entries,
key=lambda x: x.creation_timestamp,
reverse=True,
)
to_return = all_entries[:limit]
new_run_cursor = None
new_backfill_cursor = None
for entry in reversed(to_return):
if new_run_cursor is not None and new_backfill_cursor is not None:
break
if new_backfill_cursor is None and isinstance(entry, GraphenePartitionBackfill):
new_backfill_cursor = entry.id
if new_run_cursor is None and isinstance(entry, GrapheneRun):
new_run_cursor = entry.runId
new_timestamp = to_return[-1].creation_timestamp if to_return else None
# if either of the new cursors are None, replace with the cursor passed in so the next call doesn't
# restart at the top the table.
final_cursor = RunsFeedCursor(
run_cursor=new_run_cursor if new_run_cursor else runs_feed_cursor.run_cursor,
backfill_cursor=new_backfill_cursor
if new_backfill_cursor
else runs_feed_cursor.backfill_cursor,
timestamp=new_timestamp if new_timestamp else runs_feed_cursor.timestamp,
)
return GrapheneRunsFeedConnection(
results=to_return, cursor=final_cursor.to_string(), hasMore=has_more
)
def get_runs_feed_count(
graphene_info: "ResolveInfo", filters: Optional[RunsFilter], view: "GrapheneRunsFeedView"
) -> int:
from dagster_graphql.schema.runs_feed import GrapheneRunsFeedView
# In the default "ROOTS" run feed, we exclude runs that are part of backfills. If
# the user chooses the "RUNS" view, we want to flatten backfills into their runs.
exclude_subruns = view == GrapheneRunsFeedView.ROOTS
should_fetch_runs = view == GrapheneRunsFeedView.RUNS or view == GrapheneRunsFeedView.ROOTS
should_fetch_backfills = (
view == GrapheneRunsFeedView.BACKFILLS or view == GrapheneRunsFeedView.ROOTS
) and (_filters_apply_to_backfills(filters) if filters else True)
with disable_dagster_warnings():
run_filters = (
copy(filters, exclude_subruns=exclude_subruns)
if filters
else RunsFilter(exclude_subruns=exclude_subruns)
)
if should_fetch_backfills:
backfill_filters = _bulk_action_filters_from_run_filters(run_filters)
backfills_count = graphene_info.context.instance.get_backfills_count(backfill_filters)
else:
backfills_count = 0
if should_fetch_runs:
runs_count = graphene_info.context.instance.get_runs_count(run_filters)
else:
runs_count = 0
return runs_count + backfills_count
|
RunsFeedCursor
|
python
|
pypa__twine
|
tests/test_auth.py
|
{
"start": 9561,
"end": 9977
}
|
class ____:
def __init__(self, status_code: int, json: t.Any) -> None:
self.status_code = status_code
self._json = json
def json(self, *args, **kwargs) -> t.Any:
return self._json
def raise_for_status(self) -> None:
if 400 <= self.status_code:
raise requests.exceptions.HTTPError()
def ok(self) -> bool:
return self.status_code == 200
|
MockResponse
|
python
|
huggingface__transformers
|
src/transformers/models/siglip/modeling_siglip.py
|
{
"start": 16023,
"end": 18872
}
|
class ____(PreTrainedModel):
config: SiglipConfig
base_model_prefix = "siglip"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = [
"SiglipTextEmbeddings",
"SiglipVisionEmbeddings",
"SiglipEncoderLayer",
"SiglipMultiheadAttentionPoolingHead",
]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": SiglipEncoderLayer,
"attentions": SiglipAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, SiglipVisionEmbeddings):
width = (
self.config.vision_config.hidden_size
if isinstance(self.config, SiglipConfig)
else self.config.hidden_size
)
init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
elif isinstance(module, nn.Embedding):
default_flax_embed_init(module.weight)
elif isinstance(module, SiglipAttention):
init.xavier_uniform_(module.q_proj.weight)
init.xavier_uniform_(module.k_proj.weight)
init.xavier_uniform_(module.v_proj.weight)
init.xavier_uniform_(module.out_proj.weight)
init.zeros_(module.q_proj.bias)
init.zeros_(module.k_proj.bias)
init.zeros_(module.v_proj.bias)
init.zeros_(module.out_proj.bias)
elif isinstance(module, SiglipMLP):
init.xavier_uniform_(module.fc1.weight)
init.xavier_uniform_(module.fc2.weight)
init.normal_(module.fc1.bias, std=1e-6)
init.normal_(module.fc2.bias, std=1e-6)
elif isinstance(module, SiglipMultiheadAttentionPoolingHead):
init.xavier_uniform_(module.probe)
init.xavier_uniform_(module.attention.in_proj_weight)
init.zeros_(module.attention.in_proj_bias)
elif isinstance(module, SiglipModel):
init.zeros_(module.logit_scale)
init.zeros_(module.logit_bias)
elif isinstance(module, SiglipForImageClassification):
init.normal_(
module.classifier.weight,
std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, (nn.Linear, nn.Conv2d)):
lecun_normal_(module.weight)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
# Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoder with AltCLIP->Siglip
|
SiglipPreTrainedModel
|
python
|
getsentry__sentry
|
src/sentry/release_health/base.py
|
{
"start": 2471,
"end": 3089
}
|
class ____(TypedDict):
start: datetime
end: datetime
intervals: list[DateString]
groups: list[SessionsQueryGroup]
query: str
FormattedIsoTime = str
ProjectRelease = tuple[ProjectId, ReleaseName]
ProjectOrRelease = TypeVar("ProjectOrRelease", ProjectId, ProjectRelease)
# taken from sentry.snuba.sessions.STATS_PERIODS
StatsPeriod = Literal[
"1h",
"24h",
"1d",
"48h",
"2d",
"7d",
"14d",
"30d",
"90d",
]
OverviewStat = Literal["users", "sessions"]
def is_overview_stat(s: str) -> TypeIs[OverviewStat]:
return s in ("users", "sessions")
|
SessionsQueryResult
|
python
|
facebookresearch__faiss
|
tests/test_io.py
|
{
"start": 6372,
"end": 7125
}
|
class ____:
""" wraps an OnDisk object for use from C++ """
def __init__(self, oil):
self.oil = oil
def list_size(self, list_no):
return self.oil.list_size(list_no)
def get_codes(self, list_no):
oil = self.oil
assert 0 <= list_no < oil.lists.size()
l = oil.lists.at(list_no)
with open(oil.filename, 'rb') as f:
f.seek(l.offset)
return f.read(l.size * oil.code_size)
def get_ids(self, list_no):
oil = self.oil
assert 0 <= list_no < oil.lists.size()
l = oil.lists.at(list_no)
with open(oil.filename, 'rb') as f:
f.seek(l.offset + l.capacity * oil.code_size)
return f.read(l.size * 8)
|
PyOndiskInvertedLists
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config.py
|
{
"start": 61349,
"end": 61441
}
|
class ____(_ConfigBase):
cache: Optional[bool]
rescore_limit: int
@dataclass
|
_BQConfig
|
python
|
ansible__ansible
|
test/integration/targets/ansible-test-sanity-pylint/ansible_collections/ns/col/plugins/lookup/deprecated.py
|
{
"start": 3924,
"end": 4025
}
|
class ____:
def __init__(self, thing) -> None:
self.module: AnsibleModule = thing
|
MyWrapper
|
python
|
coleifer__peewee
|
tests/postgres.py
|
{
"start": 31939,
"end": 32467
}
|
class ____(DatabaseTestCase):
database = db_loader('postgres', isolation_level=3) # SERIALIZABLE.
def test_isolation_level(self):
conn = self.database.connection()
self.assertEqual(conn.isolation_level, 3)
conn.set_isolation_level(2)
self.assertEqual(conn.isolation_level, 2)
self.database.close()
conn = self.database.connection()
self.assertEqual(conn.isolation_level, 3)
@skip_unless(pg12(), 'cte materialization requires pg >= 12')
|
TestPostgresIsolationLevel
|
python
|
pandas-dev__pandas
|
pandas/tests/frame/methods/test_isetitem.py
|
{
"start": 96,
"end": 1428
}
|
class ____:
def test_isetitem_ea_df(self):
# GH#49922
df = DataFrame([[1, 2, 3], [4, 5, 6]])
rhs = DataFrame([[11, 12], [13, 14]], dtype="Int64")
df.isetitem([0, 1], rhs)
expected = DataFrame(
{
0: Series([11, 13], dtype="Int64"),
1: Series([12, 14], dtype="Int64"),
2: [3, 6],
}
)
tm.assert_frame_equal(df, expected)
def test_isetitem_ea_df_scalar_indexer(self):
# GH#49922
df = DataFrame([[1, 2, 3], [4, 5, 6]])
rhs = DataFrame([[11], [13]], dtype="Int64")
df.isetitem(2, rhs)
expected = DataFrame(
{
0: [1, 4],
1: [2, 5],
2: Series([11, 13], dtype="Int64"),
}
)
tm.assert_frame_equal(df, expected)
def test_isetitem_dimension_mismatch(self):
# GH#51701
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]})
value = df.copy()
with pytest.raises(ValueError, match="Got 2 positions but value has 3 columns"):
df.isetitem([1, 2], value)
value = df.copy()
with pytest.raises(ValueError, match="Got 2 positions but value has 1 columns"):
df.isetitem([1, 2], value[["a"]])
|
TestDataFrameSetItem
|
python
|
django__django
|
tests/contenttypes_tests/models.py
|
{
"start": 370,
"end": 517
}
|
class ____(models.Model):
name = models.CharField(max_length=100)
def get_absolute_url(self):
return "/authors/%s/" % self.id
|
Author
|
python
|
apache__airflow
|
airflow-core/tests/unit/always/test_secrets_backends.py
|
{
"start": 1704,
"end": 4172
}
|
class ____:
def setup_method(self) -> None:
clear_db_connections()
clear_db_variables()
def teardown_method(self) -> None:
clear_db_connections()
clear_db_variables()
@pytest.mark.parametrize(
("kwargs", "output"),
[
({"path_prefix": "PREFIX", "secret_id": "ID"}, "PREFIX/ID"),
({"path_prefix": "PREFIX", "secret_id": "ID", "sep": "-"}, "PREFIX-ID"),
],
ids=["default", "with_sep"],
)
def test_build_path(self, kwargs, output):
build_path = BaseSecretsBackend.build_path
assert build_path(**kwargs) == output
def test_connection_env_secrets_backend(self):
sample_conn_1 = SampleConn("sample_1", "A")
env_secrets_backend = EnvironmentVariablesBackend()
os.environ[sample_conn_1.var_name] = sample_conn_1.conn_uri
conn = env_secrets_backend.get_connection(sample_conn_1.conn_id)
# we could make this more precise by defining __eq__ method for Connection
assert sample_conn_1.host.lower() == conn.host
def test_connection_metastore_secrets_backend(self):
sample_conn_2 = SampleConn("sample_2", "A")
with create_session() as session:
session.add(sample_conn_2.conn)
session.commit()
metastore_backend = MetastoreBackend()
conn = metastore_backend.get_connection("sample_2")
assert sample_conn_2.host.lower() == conn.host
@mock.patch.dict(
"os.environ",
{
"AIRFLOW_VAR_HELLO": "World",
"AIRFLOW_VAR_EMPTY_STR": "",
},
)
def test_variable_env_secrets_backend(self):
env_secrets_backend = EnvironmentVariablesBackend()
variable_value = env_secrets_backend.get_variable(key="hello")
assert variable_value == "World"
assert env_secrets_backend.get_variable(key="non_existent_key") is None
assert env_secrets_backend.get_variable(key="empty_str") == ""
def test_variable_metastore_secrets_backend(self):
Variable.set(key="hello", value="World")
Variable.set(key="empty_str", value="")
metastore_backend = MetastoreBackend()
variable_value = metastore_backend.get_variable(key="hello")
assert variable_value == "World"
assert metastore_backend.get_variable(key="non_existent_key") is None
assert metastore_backend.get_variable(key="empty_str") == ""
|
TestBaseSecretsBackend
|
python
|
ray-project__ray
|
python/ray/serve/_private/request_router/replica_wrapper.py
|
{
"start": 1432,
"end": 3496
}
|
class ____(ReplicaWrapper):
def __init__(self, actor_handle):
self._actor_handle = actor_handle
def send_request_java(self, pr: PendingRequest) -> ActorReplicaResult:
"""Send the request to a Java replica.
Does not currently support streaming.
"""
if pr.metadata.is_streaming:
raise RuntimeError("Streaming not supported for Java.")
if len(pr.args) != 1:
raise ValueError("Java handle calls only support a single argument.")
return ActorReplicaResult(
self._actor_handle.handle_request.remote(
RequestMetadataProto(
request_id=pr.metadata.request_id,
# Default call method in java is "call," not "__call__" like Python.
call_method="call"
if pr.metadata.call_method == "__call__"
else pr.metadata.call_method,
).SerializeToString(),
pr.args,
),
pr.metadata,
)
def send_request_python(
self, pr: PendingRequest, *, with_rejection: bool
) -> ActorReplicaResult:
"""Send the request to a Python replica."""
if with_rejection:
# Call a separate handler that may reject the request.
# This handler is *always* a streaming call and the first message will
# be a system message that accepts or rejects.
method = self._actor_handle.handle_request_with_rejection.options(
num_returns="streaming"
)
elif pr.metadata.is_streaming:
method = self._actor_handle.handle_request_streaming.options(
num_returns="streaming"
)
else:
method = self._actor_handle.handle_request
obj_ref_gen = method.remote(pickle.dumps(pr.metadata), *pr.args, **pr.kwargs)
return ActorReplicaResult(
obj_ref_gen, pr.metadata, with_rejection=with_rejection
)
@PublicAPI(stability="alpha")
|
ActorReplicaWrapper
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/cpp/__init__.py
|
{
"start": 16757,
"end": 17596
}
|
class ____(CPPObject):
object_type = 'function'
doc_field_types = [
*CPPObject.doc_field_types,
GroupedField(
'parameter',
label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
can_collapse=True,
),
GroupedField(
'exceptions',
label=_('Throws'),
rolename='expr',
names=('throws', 'throw', 'exception'),
can_collapse=True,
),
GroupedField(
'retval',
label=_('Return values'),
names=('retvals', 'retval'),
can_collapse=True,
),
Field(
'returnvalue',
label=_('Returns'),
has_arg=False,
names=('returns', 'return'),
),
]
|
CPPFunctionObject
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/tasks.py
|
{
"start": 575184,
"end": 577334
}
|
class ____(Response):
"""
Response of tasks.validate endpoint.
"""
_service = "tasks"
_action = "validate"
_version = "2.23"
_schema = {"additionalProperties": False, "definitions": {}, "type": "object"}
response_mapping = {
GetByIdRequest: GetByIdResponse,
GetAllRequest: GetAllResponse,
GetTypesRequest: GetTypesResponse,
CloneRequest: CloneResponse,
AddOrUpdateModelRequest: AddOrUpdateModelResponse,
DeleteModelsRequest: DeleteModelsResponse,
CreateRequest: CreateResponse,
ValidateRequest: ValidateResponse,
UpdateRequest: UpdateResponse,
UpdateBatchRequest: UpdateBatchResponse,
EditRequest: EditResponse,
ResetRequest: ResetResponse,
ResetManyRequest: ResetManyResponse,
DeleteManyRequest: DeleteManyResponse,
DeleteRequest: DeleteResponse,
ArchiveRequest: ArchiveResponse,
ArchiveManyRequest: ArchiveManyResponse,
UnarchiveManyRequest: UnarchiveManyResponse,
StartedRequest: StartedResponse,
StopRequest: StopResponse,
StopManyRequest: StopManyResponse,
StoppedRequest: StoppedResponse,
FailedRequest: FailedResponse,
CloseRequest: CloseResponse,
PublishRequest: PublishResponse,
PublishManyRequest: PublishManyResponse,
EnqueueRequest: EnqueueResponse,
EnqueueManyRequest: EnqueueManyResponse,
DequeueRequest: DequeueResponse,
DequeueManyRequest: DequeueManyResponse,
SetRequirementsRequest: SetRequirementsResponse,
CompletedRequest: CompletedResponse,
PingRequest: PingResponse,
AddOrUpdateArtifactsRequest: AddOrUpdateArtifactsResponse,
DeleteArtifactsRequest: DeleteArtifactsResponse,
GetHyperParamsRequest: GetHyperParamsResponse,
EditHyperParamsRequest: EditHyperParamsResponse,
DeleteHyperParamsRequest: DeleteHyperParamsResponse,
GetConfigurationsRequest: GetConfigurationsResponse,
GetConfigurationNamesRequest: GetConfigurationNamesResponse,
EditConfigurationRequest: EditConfigurationResponse,
DeleteConfigurationRequest: DeleteConfigurationResponse,
ShareRequest: ShareResponse,
MoveRequest: MoveResponse,
}
|
ValidateResponse
|
python
|
python-jsonschema__jsonschema
|
jsonschema/tests/test_validators.py
|
{
"start": 76473,
"end": 77472
}
|
class ____(TestCase):
"""
Threading-related functionality tests.
jsonschema doesn't promise thread safety, and its validation behavior
across multiple threads may change at any time, but that means it isn't
safe to share *validators* across threads, not that anytime one has
multiple threads that jsonschema won't work (it certainly is intended to).
These tests ensure that this minimal level of functionality continues to
work.
"""
def test_validation_across_a_second_thread(self):
failed = []
def validate():
try:
validators.validate(instance=37, schema=True)
except: # pragma: no cover # noqa: E722
failed.append(sys.exc_info())
validate() # just verify it succeeds
from threading import Thread
thread = Thread(target=validate)
thread.start()
thread.join()
self.assertEqual((thread.is_alive(), failed), (False, []))
|
TestThreading
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 231048,
"end": 232253
}
|
class ____(Response):
"""
Response of tasks.edit_configuration endpoint.
:param updated: Indicates if the task was updated successfully
:type updated: int
"""
_service = "tasks"
_action = "edit_configuration"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(EditConfigurationResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
|
EditConfigurationResponse
|
python
|
PrefectHQ__prefect
|
src/prefect/events/actions.py
|
{
"start": 4494,
"end": 4898
}
|
class ____(Action):
"""Send a notification when an Automation is triggered"""
type: Literal["send-notification"] = "send-notification"
block_document_id: UUID = Field(
description="The identifier of the notification block to use"
)
subject: str = Field("Prefect automated notification")
body: str = Field(description="The text of the notification to send")
|
SendNotification
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/requires_virtual/package.py
|
{
"start": 217,
"end": 519
}
|
class ____(Package):
"""Package that requires a virtual dependency and is registered
as an external.
"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("2.0", md5="abcdef0123456789abcdef0123456789")
depends_on("stuff")
|
RequiresVirtual
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/inputs.py
|
{
"start": 7069,
"end": 7352
}
|
class ____(graphene.InputObjectType):
partitionNames = graphene.InputField(non_null_list(graphene.String))
assetSelection = graphene.InputField(non_null_list(GrapheneAssetKeyInput))
class Meta:
name = "AssetBackfillPreviewParams"
|
GrapheneAssetBackfillPreviewParams
|
python
|
numba__llvmlite
|
llvmlite/tests/test_refprune.py
|
{
"start": 8161,
"end": 10346
}
|
class ____(BaseTestByIR):
refprune_bitmask = llvm.RefPruneSubpasses.DIAMOND
per_diamond_1 = r"""
define void @main(i8* %ptr) {
bb_A:
call void @NRT_incref(i8* %ptr)
br label %bb_B
bb_B:
call void @NRT_decref(i8* %ptr)
ret void
}
"""
def test_per_diamond_1(self):
mod, stats = self.check(self.per_diamond_1)
self.assertEqual(stats.diamond, 2)
per_diamond_2 = r"""
define void @main(i8* %ptr, i1 %cond) {
bb_A:
call void @NRT_incref(i8* %ptr)
br i1 %cond, label %bb_B, label %bb_C
bb_B:
br label %bb_D
bb_C:
br label %bb_D
bb_D:
call void @NRT_decref(i8* %ptr)
ret void
}
"""
def test_per_diamond_2(self):
mod, stats = self.check(self.per_diamond_2)
self.assertEqual(stats.diamond, 2)
per_diamond_3 = r"""
define void @main(i8* %ptr, i1 %cond) {
bb_A:
call void @NRT_incref(i8* %ptr)
br i1 %cond, label %bb_B, label %bb_C
bb_B:
br label %bb_D
bb_C:
call void @NRT_decref(i8* %ptr) ; reject because of decref in diamond
br label %bb_D
bb_D:
call void @NRT_decref(i8* %ptr)
ret void
}
"""
def test_per_diamond_3(self):
mod, stats = self.check(self.per_diamond_3)
self.assertEqual(stats.diamond, 0)
per_diamond_4 = r"""
define void @main(i8* %ptr, i1 %cond) {
bb_A:
call void @NRT_incref(i8* %ptr)
br i1 %cond, label %bb_B, label %bb_C
bb_B:
call void @NRT_incref(i8* %ptr) ; extra incref will not affect prune
br label %bb_D
bb_C:
br label %bb_D
bb_D:
call void @NRT_decref(i8* %ptr)
ret void
}
"""
def test_per_diamond_4(self):
mod, stats = self.check(self.per_diamond_4)
self.assertEqual(stats.diamond, 2)
per_diamond_5 = r"""
define void @main(i8* %ptr, i1 %cond) {
bb_A:
call void @NRT_incref(i8* %ptr)
call void @NRT_incref(i8* %ptr)
br i1 %cond, label %bb_B, label %bb_C
bb_B:
br label %bb_D
bb_C:
br label %bb_D
bb_D:
call void @NRT_decref(i8* %ptr)
call void @NRT_decref(i8* %ptr)
ret void
}
"""
def test_per_diamond_5(self):
mod, stats = self.check(self.per_diamond_5)
self.assertEqual(stats.diamond, 4)
|
TestDiamond
|
python
|
pytorch__pytorch
|
test/export/test_sparse.py
|
{
"start": 1415,
"end": 1506
}
|
class ____(torch.nn.Module):
def forward(self, x):
return x.to_dense()
|
ToDenseNet
|
python
|
wandb__wandb
|
wandb/sdk/internal/file_stream.py
|
{
"start": 4034,
"end": 10455
}
|
class ____(DefaultFilePolicy):
r"""File stream policy for removing carriage-return erased characters.
This is what a terminal does. We use it for console output to reduce the amount of
data we need to send over the network (eg. for progress bars), while preserving the
output's appearance in the web app.
CR stands for "carriage return", for the character \r. It tells the terminal to move
the cursor back to the start of the current line. Progress bars (like tqdm) use \r
repeatedly to overwrite a line with newer updates. This gives the illusion of the
progress bar filling up in real-time.
"""
def __init__(self, start_chunk_id: int = 0) -> None:
super().__init__(start_chunk_id=start_chunk_id)
self._prev_chunk = None
self.global_offset = 0
# cr refers to carriage return \r
self.stderr = StreamCRState()
self.stdout = StreamCRState()
@staticmethod
def get_consecutive_offsets(console: Dict[int, str]) -> List[List[int]]:
"""Compress consecutive line numbers into an interval.
Args:
console: Dict[int, str] which maps offsets (line numbers) to lines of text.
It represents a mini version of our console dashboard on the UI.
Returns:
A list of intervals (we compress consecutive line numbers into an interval).
Example:
>>> console = {2: "", 3: "", 4: "", 5: "", 10: "", 11: "", 20: ""}
>>> get_consecutive_offsets(console)
[(2, 5), (10, 11), (20, 20)]
"""
offsets = sorted(list(console.keys()))
intervals: List = []
for i, num in enumerate(offsets):
if i == 0:
intervals.append([num, num])
continue
largest = intervals[-1][1]
if num == largest + 1:
intervals[-1][1] = num
else:
intervals.append([num, num])
return intervals
@staticmethod
def split_chunk(chunk: Chunk) -> Tuple[str, str]:
r"""Split chunks.
Args:
chunk: object with two fields: filename (str) & data (str)
`chunk.data` is a str containing the lines we want. It usually contains \n or \r or both.
`chunk.data` has two possible formats (for the two streams - stdout and stderr):
- "2020-08-25T20:38:36.895321 this is my line of text\nsecond line\n"
- "ERROR 2020-08-25T20:38:36.895321 this is my line of text\nsecond line\nthird\n".
Here's another example with a carriage return \r.
- "ERROR 2020-08-25T20:38:36.895321 \r progress bar\n"
Returns:
A 2-tuple of strings.
First str is prefix, either "ERROR {timestamp} " or "{timestamp} ".
Second str is the rest of the string.
Example:
>>> chunk = Chunk(
... filename="output.log",
... data="ERROR 2020-08-25T20:38 this is my line of text\n",
... )
>>> split_chunk(chunk)
("ERROR 2020-08-25T20:38 ", "this is my line of text\n")
"""
prefix = ""
token, rest = chunk.data.split(" ", 1)
if token == "ERROR":
prefix += token + " "
token, rest = rest.split(" ", 1)
prefix += token + " "
return prefix, rest
def process_chunks(self, chunks: List[Chunk]) -> List["ProcessedChunk"]:
r"""Process chunks.
Args:
chunks: List of Chunk objects. See description of chunk above in `split_chunk(...)`.
Returns:
List[Dict]. Each dict in the list contains two keys: an `offset` which holds the line number
and `content` which maps to a list of consecutive lines starting from that offset.
`offset` here means global line number in our console on the UI.
Example:
>>> chunks = [
Chunk("output.log", "ERROR 2020-08-25T20:38 this is my line of text\nboom\n"),
Chunk("output.log", "2020-08-25T20:38 this is test\n"),
]
>>> process_chunks(chunks)
[
{"offset": 0, "content": [
"ERROR 2020-08-25T20:38 this is my line of text\n",
"ERROR 2020-08-25T20:38 boom\n",
"2020-08-25T20:38 this is test\n"
]
}
]
"""
# Dict[int->str], each offset (line number) mapped to a line.
# Represents a mini-version of our console pane on the UI.
console = {}
sep = os.linesep
for c in chunks:
prefix, logs_str = self.split_chunk(c)
logs = logs_str.split(sep)
for line in logs:
stream = self.stderr if prefix.startswith("ERROR ") else self.stdout
if line.startswith("\r"):
# line starting with \r will always overwrite a previous offset.
offset: int = (
stream.cr
if (stream.found_cr and stream.cr is not None)
else (stream.last_normal or 0)
)
stream.cr = offset
stream.found_cr = True
console[offset] = prefix + line[1:] + "\n"
# Usually logs_str = "\r progress bar\n" for progress bar updates.
# If instead logs_str = "\r progress bar\n text\n text\n",
# treat this as the end of a progress bar and reset accordingly.
if (
logs_str.count(sep) > 1
and logs_str.replace(sep, "").count("\r") == 1
):
stream.found_cr = False
elif line:
console[self.global_offset] = prefix + line + "\n"
stream.last_normal = self.global_offset
self.global_offset += 1
intervals = self.get_consecutive_offsets(console)
ret = []
for a, b in intervals:
processed_chunk: ProcessedChunk = {
"offset": self._chunk_id + a,
"content": [console[i] for i in range(a, b + 1)],
}
ret.append(processed_chunk)
return ret
|
CRDedupeFilePolicy
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/internal/conjecture/shrinking/string.py
|
{
"start": 562,
"end": 946
}
|
class ____(Collection):
def __init__(self, initial, predicate, *, intervals, **kwargs):
super().__init__(
list(initial),
lambda val: predicate("".join(val)),
to_order=intervals.index_from_char_in_shrink_order,
from_order=intervals.char_in_shrink_order,
ElementShrinker=Integer,
**kwargs,
)
|
String
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/scoping.py
|
{
"start": 2762,
"end": 4153
}
|
class ____(Protocol):
"""Describes the type applied to a class-level
:meth:`_orm.scoped_session.query_property` attribute.
.. versionadded:: 2.0.5
"""
def __get__(self, instance: Any, owner: Type[_T]) -> Query[_T]: ...
_O = TypeVar("_O", bound=object)
__all__ = ["scoped_session"]
@create_proxy_methods(
Session,
":class:`_orm.Session`",
":class:`_orm.scoping.scoped_session`",
classmethods=["object_session", "identity_key"],
methods=[
"__contains__",
"__iter__",
"add",
"add_all",
"begin",
"begin_nested",
"close",
"reset",
"commit",
"connection",
"delete",
"delete_all",
"execute",
"expire",
"expire_all",
"expunge",
"expunge_all",
"flush",
"get",
"get_one",
"get_bind",
"is_modified",
"bulk_save_objects",
"bulk_insert_mappings",
"bulk_update_mappings",
"merge",
"merge_all",
"query",
"refresh",
"rollback",
"scalar",
"scalars",
],
attributes=[
"bind",
"dirty",
"deleted",
"new",
"identity_map",
"is_active",
"autoflush",
"no_autoflush",
"info",
"execution_options",
],
)
|
QueryPropertyDescriptor
|
python
|
sphinx-doc__sphinx
|
doc/development/tutorials/examples/todo.py
|
{
"start": 573,
"end": 4070
}
|
class ____(SphinxDirective):
# this enables content in the directive
has_content = True
def run(self):
targetid = 'todo-%d' % self.env.new_serialno('todo')
targetnode = nodes.target('', '', ids=[targetid])
todo_node = todo('\n'.join(self.content))
todo_node += nodes.title(_('Todo'), _('Todo'))
todo_node += self.parse_content_to_nodes()
if not hasattr(self.env, 'todo_all_todos'):
self.env.todo_all_todos = []
self.env.todo_all_todos.append({
'docname': self.env.current_document.docname,
'lineno': self.lineno,
'todo': todo_node.deepcopy(),
'target': targetnode,
})
return [targetnode, todo_node]
def purge_todos(app, env, docname):
if not hasattr(env, 'todo_all_todos'):
return
env.todo_all_todos = [
todo for todo in env.todo_all_todos if todo['docname'] != docname
]
def merge_todos(app, env, docnames, other):
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
if hasattr(other, 'todo_all_todos'):
env.todo_all_todos.extend(other.todo_all_todos)
def process_todo_nodes(app, doctree, fromdocname):
if not app.config.todo_include_todos:
for node in doctree.findall(todo):
node.parent.remove(node)
# Replace all todolist nodes with a list of the collected todos.
# Augment each todo with a backlink to the original location.
env = app.env
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
for node in doctree.findall(todolist):
if not app.config.todo_include_todos:
node.replace_self([])
continue
content = []
for todo_info in env.todo_all_todos:
para = nodes.paragraph()
filename = env.doc2path(todo_info['docname'], base=None)
description = _(
'(The original entry is located in %s, line %d and can be found '
) % (filename, todo_info['lineno'])
para += nodes.Text(description)
# Create a reference
newnode = nodes.reference('', '')
innernode = nodes.emphasis(_('here'), _('here'))
newnode['refdocname'] = todo_info['docname']
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, todo_info['docname']
)
newnode['refuri'] += '#' + todo_info['target']['refid']
newnode.append(innernode)
para += newnode
para += nodes.Text('.)')
# Insert into the todolist
content.extend((
todo_info['todo'],
para,
))
node.replace_self(content)
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_config_value('todo_include_todos', False, 'html')
app.add_node(todolist)
app.add_node(
todo,
html=(visit_todo_node, depart_todo_node),
latex=(visit_todo_node, depart_todo_node),
text=(visit_todo_node, depart_todo_node),
)
app.add_directive('todo', TodoDirective)
app.add_directive('todolist', TodolistDirective)
app.connect('doctree-resolved', process_todo_nodes)
app.connect('env-purge-doc', purge_todos)
app.connect('env-merge-info', merge_todos)
return {
'version': '0.1',
'env_version': 1,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
TodoDirective
|
python
|
scipy__scipy
|
scipy/optimize/tests/test_optimize.py
|
{
"start": 5357,
"end": 39342
}
|
class ____(CheckOptimize):
def test_cg(self):
# conjugate gradient optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='CG', jac=self.grad,
options=opts)
params, fopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams,
self.grad, (), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert self.funccalls.c == 9, self.funccalls.c
assert self.gradcalls.c == 7, self.gradcalls.c
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace.t[2:4],
[[0, -0.5, 0.5],
[0, -5.05700028e-01, 4.95985862e-01]],
atol=1e-14, rtol=1e-7)
def test_cg_cornercase(self):
def f(r):
return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2
# Check several initial guesses. (Too far away from the
# minimum, the function ends up in the flat region of exp.)
for x0 in np.linspace(-0.75, 3, 71):
sol = optimize.minimize(f, [x0], method='CG')
assert sol.success
assert_allclose(sol.x, [0.5], rtol=1e-5)
def test_bfgs(self):
# Broyden-Fletcher-Goldfarb-Shanno optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS', args=(),
options=opts)
params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (
res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, gopt, Hopt,
func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert self.funccalls.c == 10, self.funccalls.c
assert self.gradcalls.c == 8, self.gradcalls.c
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace.t[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs_hess_inv0_neg(self):
# Ensure that BFGS does not accept neg. def. initial inverse
# Hessian estimate.
with pytest.raises(ValueError, match="'hess_inv0' matrix isn't "
"positive definite."):
x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
opts = {'disp': self.disp, 'hess_inv0': -np.eye(5)}
optimize.minimize(optimize.rosen, x0=x0, method='BFGS', args=(),
options=opts)
def test_bfgs_hess_inv0_semipos(self):
# Ensure that BFGS does not accept semi pos. def. initial inverse
# Hessian estimate.
with pytest.raises(ValueError, match="'hess_inv0' matrix isn't "
"positive definite."):
x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
hess_inv0 = np.eye(5)
hess_inv0[0, 0] = 0
opts = {'disp': self.disp, 'hess_inv0': hess_inv0}
optimize.minimize(optimize.rosen, x0=x0, method='BFGS', args=(),
options=opts)
def test_bfgs_hess_inv0_sanity(self):
# Ensure that BFGS handles `hess_inv0` parameter correctly.
fun = optimize.rosen
x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
opts = {'disp': self.disp, 'hess_inv0': 1e-2 * np.eye(5)}
res = optimize.minimize(fun, x0=x0, method='BFGS', args=(),
options=opts)
res_true = optimize.minimize(fun, x0=x0, method='BFGS', args=(),
options={'disp': self.disp})
assert_allclose(res.fun, res_true.fun, atol=1e-6)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_bfgs_infinite(self):
# Test corner case where -Inf is the minimum. See gh-2019.
def func(x):
return -np.e ** (-x)
def fprime(x):
return -func(x)
x0 = [0]
with np.errstate(over='ignore'):
if self.use_wrapper:
opts = {'disp': self.disp}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
assert not np.isfinite(func(x))
def test_bfgs_xrtol(self):
# test for #17345 to test xrtol parameter
x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
res = optimize.minimize(optimize.rosen,
x0, method='bfgs', options={'xrtol': 1e-3})
ref = optimize.minimize(optimize.rosen,
x0, method='bfgs', options={'gtol': 1e-3})
assert res.nit != ref.nit
def test_bfgs_c1(self):
# test for #18977 insufficiently low value of c1 leads to precision loss
# for poor starting parameters
x0 = [10.3, 20.7, 10.8, 1.9, -1.2]
res_c1_small = optimize.minimize(optimize.rosen,
x0, method='bfgs', options={'c1': 1e-8})
res_c1_big = optimize.minimize(optimize.rosen,
x0, method='bfgs', options={'c1': 1e-1})
assert res_c1_small.nfev > res_c1_big.nfev
def test_bfgs_c2(self):
# test that modification of c2 parameter
# results in different number of iterations
x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
res_default = optimize.minimize(optimize.rosen,
x0, method='bfgs', options={'c2': .9})
res_mod = optimize.minimize(optimize.rosen,
x0, method='bfgs', options={'c2': 1e-2})
assert res_default.nit > res_mod.nit
@pytest.mark.parametrize(["c1", "c2"], [[0.5, 2],
[-0.1, 0.1],
[0.2, 0.1]])
def test_invalid_c1_c2(self, c1, c2):
with pytest.raises(ValueError, match="'c1' and 'c2'"):
x0 = [10.3, 20.7, 10.8, 1.9, -1.2]
optimize.minimize(optimize.rosen, x0, method='cg',
options={'c1': c1, 'c2': c2})
def test_powell(self):
# Powell (direction set) optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = (
res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# params[0] does not affect the objective function
assert_allclose(params[1:], self.solution[1:], atol=5e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
#
# However, some leeway must be added: the exact evaluation
# count is sensitive to numerical error, and floating-point
# computations are not bit-for-bit reproducible across
# machines, and when using e.g., MKL, data alignment
# etc., affect the rounding error.
#
assert self.funccalls.c <= 116 + 20, self.funccalls.c
assert self.gradcalls.c == 0, self.gradcalls.c
@pytest.mark.xfail(reason="This part of test_powell fails on some "
"platforms, but the solution returned by powell is "
"still valid.")
def test_powell_gh14014(self):
# This part of test_powell started failing on some CI platforms;
# see gh-14014. Since the solution is still correct and the comments
# in test_powell suggest that small differences in the bits are known
# to change the "trace" of the solution, seems safe to xfail to get CI
# green now and investigate later.
# Powell (direction set) optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = (
res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[34:39],
[[0.72949016, -0.44156936, 0.47100962],
[0.72949016, -0.44156936, 0.48052496],
[1.45898031, -0.88313872, 0.95153458],
[0.72949016, -0.44156936, 0.47576729],
[1.72949016, -0.44156936, 0.47576729]],
atol=1e-14, rtol=1e-7)
def test_powell_bounded(self):
# Powell (direction set) optimization routine
# same as test_powell above, but with bounds
bounds = [(-np.pi, np.pi) for _ in self.startparams]
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
bounds=bounds,
method='Powell', options=opts)
params, func_calls = (res['x'], res['nfev'])
assert func_calls == self.funccalls.c
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6, rtol=1e-5)
# The exact evaluation count is sensitive to numerical error, and
# floating-point computations are not bit-for-bit reproducible
# across machines, and when using e.g. MKL, data alignment etc.
# affect the rounding error.
# It takes 155 calls on my machine, but we can add the same +20
# margin as is used in `test_powell`
assert self.funccalls.c <= 155 + 20
assert self.gradcalls.c == 0
def test_neldermead(self):
# Nelder-Mead simplex algorithm
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag = (
res['x'], res['fun'], res['nit'], res['nfev'],
res['status'])
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert self.funccalls.c == 167, self.funccalls.c
assert self.gradcalls.c == 0, self.gradcalls.c
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace.t[76:78],
[[0.1928968, -0.62780447, 0.35166118],
[0.19572515, -0.63648426, 0.35838135]],
atol=1e-14, rtol=1e-7)
def test_neldermead_initial_simplex(self):
# Nelder-Mead simplex algorithm
simplex = np.zeros((4, 3))
simplex[...] = self.startparams
for j in range(3):
simplex[j+1, j] += 0.1
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': True, 'initial_simplex': simplex}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag = (res['x'],
res['fun'],
res['nit'],
res['nfev'],
res['status'])
assert_allclose(res['allvecs'][0], simplex[0])
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False,
initial_simplex=simplex)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.17.0. Don't allow them to increase.
assert self.funccalls.c == 100, self.funccalls.c
assert self.gradcalls.c == 0, self.gradcalls.c
# Ensure that the function behaves the same; this is from SciPy 0.15.0
assert_allclose(self.trace.t[50:52],
[[0.14687474, -0.5103282, 0.48252111],
[0.14474003, -0.5282084, 0.48743951]],
atol=1e-14, rtol=1e-7)
def test_neldermead_initial_simplex_bad(self):
# Check it fails with a bad simplices
bad_simplices = []
simplex = np.zeros((3, 2))
simplex[...] = self.startparams[:2]
for j in range(2):
simplex[j+1, j] += 0.1
bad_simplices.append(simplex)
simplex = np.zeros((3, 3))
bad_simplices.append(simplex)
for simplex in bad_simplices:
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False, 'initial_simplex': simplex}
assert_raises(ValueError,
optimize.minimize,
self.func,
self.startparams,
args=(),
method='Nelder-mead',
options=opts)
else:
assert_raises(ValueError, optimize.fmin,
self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False,
initial_simplex=simplex)
def test_neldermead_x0_ub(self):
# checks whether minimisation occurs correctly for entries where
# x0 == ub
# gh19991
def quad(x):
return np.sum(x**2)
res = optimize.minimize(
quad,
[1],
bounds=[(0, 1.)],
method='nelder-mead'
)
assert_allclose(res.x, [0])
res = optimize.minimize(
quad,
[1, 2],
bounds=[(0, 1.), (1, 3.)],
method='nelder-mead'
)
assert_allclose(res.x, [0, 1])
def test_ncg_negative_maxiter(self):
# Regression test for gh-8241
opts = {'maxiter': -1}
result = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)
assert result.status == 1
def test_ncg_zero_xtol(self):
# Regression test for gh-20214
def cosine(x):
return np.cos(x[0])
def jac(x):
return -np.sin(x[0])
x0 = [0.1]
xtol = 0
result = optimize.minimize(cosine,
x0=x0,
jac=jac,
method="newton-cg",
options=dict(xtol=xtol))
assert result.status == 0
assert_almost_equal(result.x[0], np.pi)
def test_ncg(self):
# line-search Newton conjugate gradient optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert self.funccalls.c == 7, self.funccalls.c
assert self.gradcalls.c <= 22, self.gradcalls.c # 0.13.0
# assert self.gradcalls <= 18, self.gradcalls # 0.9.0
# assert self.gradcalls == 18, self.gradcalls # 0.8.0
# assert self.gradcalls == 22, self.gradcalls # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace.t[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hess(self):
# Newton conjugate gradient with Hessian
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess=self.hess,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess=self.hess,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert self.funccalls.c <= 7, self.funccalls.c # gh10673
assert self.gradcalls.c <= 18, self.gradcalls.c # 0.9.0
# assert self.gradcalls == 18, self.gradcalls # 0.8.0
# assert self.gradcalls == 22, self.gradcalls # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace.t[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hessp(self):
# Newton conjugate gradient with Hessian times a vector p.
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hessp=self.hessp,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p=self.hessp,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert self.funccalls.c <= 7, self.funccalls.c # gh10673
assert self.gradcalls.c <= 18, self.gradcalls.c # 0.9.0
# assert self.gradcalls == 18, self.gradcalls # 0.8.0
# assert self.gradcalls == 22, self.gradcalls # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace.t[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_cobyqa(self):
# COBYQA method.
if self.use_wrapper:
res = optimize.minimize(
self.func,
self.startparams,
method='cobyqa',
options={'maxiter': self.maxiter, 'disp': self.disp},
)
assert_allclose(res.fun, self.func(self.solution), atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 1.14.0. Don't allow them to increase. The exact evaluation
# count is sensitive to numerical error and floating-point
# computations are not bit-for-bit reproducible across machines. It
# takes 45 calls on my machine, but we can add the same +20 margin
# as is used in `test_powell`
assert self.funccalls.c <= 45 + 20, self.funccalls.c
def test_maxfev_test():
rng = np.random.default_rng(271707100830272976862395227613146332411)
def cost(x):
return rng.random(1) * 1000 # never converged problem
for imaxfev in [1, 10, 50]:
# "TNC" and "L-BFGS-B" also supports max function evaluation, but
# these may violate the limit because of evaluating gradients
# by numerical differentiation. See the discussion in PR #14805.
for method in ['Powell', 'Nelder-Mead']:
result = optimize.minimize(cost, rng.random(10),
method=method,
options={'maxfev': imaxfev})
assert result["nfev"] == imaxfev
def test_wrap_scalar_function_with_validation():
def func_(x):
return x
fcalls, func = optimize._optimize.\
_wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
for i in range(5):
func(np.asarray(i))
assert fcalls[0] == i+1
msg = "Too many function calls"
with assert_raises(optimize._optimize._MaxFuncCallError, match=msg):
func(np.asarray(i)) # exceeded maximum function call
fcalls, func = optimize._optimize.\
_wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
msg = "The user-provided objective function must return a scalar value."
with assert_raises(ValueError, match=msg):
func(np.array([1, 1]))
def test_obj_func_returns_scalar():
match = ("The user-provided "
"objective function must "
"return a scalar value.")
with assert_raises(ValueError, match=match):
optimize.minimize(lambda x: x, np.array([1, 1]), method='BFGS')
def test_neldermead_iteration_num():
x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
res = optimize._minimize._minimize_neldermead(optimize.rosen, x0,
xatol=1e-8)
assert res.nit <= 339
def test_neldermead_respect_fp():
# Nelder-Mead should respect the fp type of the input + function
x0 = np.array([5.0, 4.0]).astype(np.float32)
def rosen_(x):
assert x.dtype == np.float32
return optimize.rosen(x)
optimize.minimize(rosen_, x0, method='Nelder-Mead')
def test_neldermead_xatol_fatol():
# gh4484
# test we can call with fatol, xatol specified
def func(x):
return x[0] ** 2 + x[1] ** 2
optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2,
xatol=1e-3, fatol=1e-3)
def test_neldermead_adaptive():
def func(x):
return np.sum(x ** 2)
p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159,
0.32308456, 0.9692297, 0.4471682, 0.77411992, 0.80441652,
0.35994957, 0.75487856, 0.99973421, 0.65063887, 0.09626474]
res = optimize.minimize(func, p0, method='Nelder-Mead')
assert_equal(res.success, False)
res = optimize.minimize(func, p0, method='Nelder-Mead',
options={'adaptive': True})
assert_equal(res.success, True)
def test_bounded_powell_outsidebounds():
# With the bounded Powell method if you start outside the bounds the final
# should still be within the bounds (provided that the user doesn't make a
# bad choice for the `direc` argument).
def func(x):
return np.sum(x ** 2)
bounds = (-1, 1), (-1, 1), (-1, 1)
x0 = [-4, .5, -.8]
# we're starting outside the bounds, so we should get a warning
with pytest.warns(optimize.OptimizeWarning):
res = optimize.minimize(func, x0, bounds=bounds, method="Powell")
assert_allclose(res.x, np.array([0.] * len(x0)), atol=1e-6)
assert_equal(res.success, True)
assert_equal(res.status, 0)
# However, now if we change the `direc` argument such that the
# set of vectors does not span the parameter space, then we may
# not end up back within the bounds. Here we see that the first
# parameter cannot be updated!
direc = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
# we're starting outside the bounds, so we should get a warning
with pytest.warns(optimize.OptimizeWarning):
res = optimize.minimize(func, x0,
bounds=bounds, method="Powell",
options={'direc': direc})
assert_allclose(res.x, np.array([-4., 0, 0]), atol=1e-6)
assert_equal(res.success, False)
assert_equal(res.status, 4)
def test_bounded_powell_vs_powell():
# here we test an example where the bounded Powell method
# will return a different result than the standard Powell
# method.
# first we test a simple example where the minimum is at
# the origin and the minimum that is within the bounds is
# larger than the minimum at the origin.
def func(x):
return np.sum(x ** 2)
bounds = (-5, -1), (-10, -0.1), (1, 9.2), (-4, 7.6), (-15.9, -2)
x0 = [-2.1, -5.2, 1.9, 0, -2]
options = {'ftol': 1e-10, 'xtol': 1e-10}
res_powell = optimize.minimize(func, x0, method="Powell", options=options)
assert_allclose(res_powell.x, 0., atol=1e-6)
assert_allclose(res_powell.fun, 0., atol=1e-6)
res_bounded_powell = optimize.minimize(func, x0, options=options,
bounds=bounds,
method="Powell")
p = np.array([-1, -0.1, 1, 0, -2])
assert_allclose(res_bounded_powell.x, p, atol=1e-6)
assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)
# now we test bounded Powell but with a mix of inf bounds.
bounds = (None, -1), (-np.inf, -.1), (1, np.inf), (-4, None), (-15.9, -2)
res_bounded_powell = optimize.minimize(func, x0, options=options,
bounds=bounds,
method="Powell")
p = np.array([-1, -0.1, 1, 0, -2])
assert_allclose(res_bounded_powell.x, p, atol=1e-6)
assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)
# next we test an example where the global minimum is within
# the bounds, but the bounded Powell method performs better
# than the standard Powell method.
def func(x):
t = np.sin(-x[0]) * np.cos(x[1]) * np.sin(-x[0] * x[1]) * np.cos(x[1])
t -= np.cos(np.sin(x[1] * x[2]) * np.cos(x[2]))
return t**2
bounds = [(-2, 5)] * 3
x0 = [-0.5, -0.5, -0.5]
res_powell = optimize.minimize(func, x0, method="Powell")
res_bounded_powell = optimize.minimize(func, x0,
bounds=bounds,
method="Powell")
assert_allclose(res_powell.fun, 0.007136253919761627, atol=1e-6)
assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)
# next we test the previous example where the we provide Powell
# with (-inf, inf) bounds, and compare it to providing Powell
# with no bounds. They should end up the same.
bounds = [(-np.inf, np.inf)] * 3
res_bounded_powell = optimize.minimize(func, x0,
bounds=bounds,
method="Powell")
assert_allclose(res_powell.fun, res_bounded_powell.fun, atol=1e-6)
assert_allclose(res_powell.nfev, res_bounded_powell.nfev, atol=1e-6)
assert_allclose(res_powell.x, res_bounded_powell.x, atol=1e-6)
# now test when x0 starts outside of the bounds.
x0 = [45.46254415, -26.52351498, 31.74830248]
bounds = [(-2, 5)] * 3
# we're starting outside the bounds, so we should get a warning
with pytest.warns(optimize.OptimizeWarning):
res_bounded_powell = optimize.minimize(func, x0,
bounds=bounds,
method="Powell")
assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)
def test_onesided_bounded_powell_stability():
# When the Powell method is bounded on only one side, a
# np.tan transform is done in order to convert it into a
# completely bounded problem. Here we do some simple tests
# of one-sided bounded Powell where the optimal solutions
# are large to test the stability of the transformation.
kwargs = {'method': 'Powell',
'bounds': [(-np.inf, 1e6)] * 3,
'options': {'ftol': 1e-8, 'xtol': 1e-8}}
x0 = [1, 1, 1]
# df/dx is constant.
def f(x):
return -np.sum(x)
res = optimize.minimize(f, x0, **kwargs)
assert_allclose(res.fun, -3e6, atol=1e-4)
# df/dx gets smaller and smaller.
def f(x):
return -np.abs(np.sum(x)) ** (0.1) * (1 if np.all(x > 0) else -1)
res = optimize.minimize(f, x0, **kwargs)
assert_allclose(res.fun, -(3e6) ** (0.1))
# df/dx gets larger and larger.
def f(x):
return -np.abs(np.sum(x)) ** 10 * (1 if np.all(x > 0) else -1)
res = optimize.minimize(f, x0, **kwargs)
assert_allclose(res.fun, -(3e6) ** 10, rtol=1e-7)
# df/dx gets larger for some of the variables and smaller for others.
def f(x):
t = -np.abs(np.sum(x[:2])) ** 5 - np.abs(np.sum(x[2:])) ** (0.1)
t *= (1 if np.all(x > 0) else -1)
return t
kwargs['bounds'] = [(-np.inf, 1e3)] * 3
res = optimize.minimize(f, x0, **kwargs)
assert_allclose(res.fun, -(2e3) ** 5 - (1e6) ** (0.1), rtol=1e-7)
|
CheckOptimizeParameterized
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/odnoklassniki/views.py
|
{
"start": 629,
"end": 2059
}
|
class ____(OAuth2Adapter):
provider_id = "odnoklassniki"
access_token_url = "https://api.odnoklassniki.ru/oauth/token.do" # nosec
authorize_url = "https://www.odnoklassniki.ru/oauth/authorize"
profile_url = "https://api.odnoklassniki.ru/fb.do"
access_token_method = "POST" # nosec
def complete_login(self, request, app, token, **kwargs):
data = {
"method": "users.getCurrentUser",
"access_token": token.token,
"fields": ",".join(USER_FIELDS),
"format": "JSON",
"application_key": app.key,
}
# Ondoklassniki prescribes a weak algo
suffix = md5(
"{0:s}{1:s}".format(data["access_token"], app.secret).encode("utf-8")
).hexdigest() # nosec
check_list = sorted(
["{0:s}={1:s}".format(k, v) for k, v in data.items() if k != "access_token"]
)
data["sig"] = md5(
("".join(check_list) + suffix).encode("utf-8")
).hexdigest() # nosec
response = (
get_adapter().get_requests_session().get(self.profile_url, params=data)
)
extra_data = response.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(OdnoklassnikiOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(OdnoklassnikiOAuth2Adapter)
|
OdnoklassnikiOAuth2Adapter
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/exporters/PrintExporter.py
|
{
"start": 263,
"end": 2586
}
|
class ____(Exporter):
Name = "Printer"
def __init__(self, item):
Exporter.__init__(self, item)
tr = self.getTargetRect()
self.params = Parameter.create(name='params', type='group', children=[
{'name': 'width', 'title': translate("Exporter", 'width'), 'type': 'float', 'value': 0.1,
'limits': (0, None), 'suffix': 'm', 'siPrefix': True},
{'name': 'height', 'title': translate("Exporter", 'height'), 'type': 'float',
'value': (0.1 * tr.height()) / tr.width(), 'limits': (0, None), 'suffix': 'm', 'siPrefix': True},
])
self.params.param('width').sigValueChanged.connect(self.widthChanged)
self.params.param('height').sigValueChanged.connect(self.heightChanged)
def widthChanged(self):
sr = self.getSourceRect()
ar = sr.height() / sr.width()
self.params.param('height').setValue(self.params['width'] * ar, blockSignal=self.heightChanged)
def heightChanged(self):
sr = self.getSourceRect()
ar = sr.width() / sr.height()
self.params.param('width').setValue(self.params['height'] * ar, blockSignal=self.widthChanged)
def parameters(self):
return self.params
def export(self, fileName=None):
printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution)
dialog = QtGui.QPrintDialog(printer)
dialog.setWindowTitle(translate('Exporter', "Print Document"))
if dialog.exec_() != QtWidgets.QDialog.DialogCode.Accepted:
return
res = QtGui.QGuiApplication.primaryScreen().physicalDotsPerInchX()
printer.setResolution(res)
rect = printer.pageRect()
center = rect.center()
h = self.params['height'] * res * 100. / 2.54
w = self.params['width'] * res * 100. / 2.54
x = center.x() - w/2.
y = center.y() - h/2.
targetRect = QtCore.QRect(x, y, w, h)
sourceRect = self.getSourceRect()
painter = QtGui.QPainter(printer)
try:
self.setExportMode(True, {'painter': painter})
self.getScene().render(painter, QtCore.QRectF(targetRect), QtCore.QRectF(sourceRect))
finally:
self.setExportMode(False)
painter.end()
#PrintExporter.register()
|
PrintExporter
|
python
|
openai__openai-python
|
src/openai/resources/realtime/realtime.py
|
{
"start": 6793,
"end": 7196
}
|
class ____:
def __init__(self, realtime: Realtime) -> None:
self._realtime = realtime
@cached_property
def client_secrets(self) -> ClientSecretsWithRawResponse:
return ClientSecretsWithRawResponse(self._realtime.client_secrets)
@cached_property
def calls(self) -> CallsWithRawResponse:
return CallsWithRawResponse(self._realtime.calls)
|
RealtimeWithRawResponse
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/multi_kernel.py
|
{
"start": 18906,
"end": 19961
}
|
class ____(MultiKernel):
"""
Version of multi-kernel that generates kernels based on specified size hints.
Currently only performs 1-d search over hints; doesn't perform combinatorial n-d search
if n > 1 dynamic dimensions are specified.
e.g. matmul([s0, s1], [s1, s2]) with size-hints [64, 256] only generates 2 kernels,
based on tuning shapes ([64, 64], [64, 64]) and ([256, 256], [256, 256])
"""
def __init__(self, kernels):
assert isinstance(kernels, dict) and len(kernels) >= 1
self.kernels, self.kernel_shape_keys = [], []
for shape_key, kernel in kernels.items():
self.kernels.append(kernel)
self.kernel_shape_keys.append(shape_key)
self.kernel_name = V.graph.wrapper_code.multi_kernel_state.define_kernel(
self.kernels, self.kernel_shape_keys
)
# need this since some code in inductor check if the kernel object has an args
# attribute to decide if it's a non-null kernel.
self.args = object()
|
SizeHintMultiKernel
|
python
|
davidhalter__jedi
|
jedi/inference/arguments.py
|
{
"start": 4449,
"end": 4604
}
|
class ____:
def unpack(self, funcdef=None):
raise NotImplementedError
def get_calling_nodes(self):
return []
|
_AbstractArgumentsMixin
|
python
|
kamyu104__LeetCode-Solutions
|
Python/sentence-similarity-iii.py
|
{
"start": 29,
"end": 731
}
|
class ____(object):
def areSentencesSimilar(self, sentence1, sentence2):
"""
:type sentence1: str
:type sentence2: str
:rtype: bool
"""
if len(sentence1) > len(sentence2):
sentence1, sentence2 = sentence2, sentence1
count = 0
for idx in (lambda x:x, lambda x:-1-x):
for i in xrange(len(sentence1)+1):
c1 = sentence1[idx(i)] if i != len(sentence1) else ' '
c2 = sentence2[idx(i)] if i != len(sentence2) else ' '
if c1 != c2:
break
if c1 == ' ':
count += 1
return count >= sentence1.count(' ')+1
|
Solution
|
python
|
apache__airflow
|
airflow-core/src/airflow/metrics/otel_logger.py
|
{
"start": 10563,
"end": 11357
}
|
class ____:
"""Stores sync gauge instrument and current value to support delta feature."""
def __init__(self, meter, name: str, tags: Attributes):
self.attributes = tags
otel_safe_name = _get_otel_safe_name(name)
self.gauge = meter.create_gauge(name=otel_safe_name)
log.debug("Created %s as type: %s", otel_safe_name, _type_as_str(self.gauge))
self.value = DEFAULT_GAUGE_VALUE
self.gauge.set(self.value, attributes=self.attributes)
def set_value(self, new_value: int | float, delta: bool):
"""Delta feature to increase old value with new value and metric export."""
if delta:
new_value += self.value
self.value = new_value
self.gauge.set(new_value, attributes=self.attributes)
|
InternalGauge
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/_config.py
|
{
"start": 94109,
"end": 95335
}
|
class ____(TypedDict, total=False):
"""
:class:`altair.ErrorBarConfig` ``TypedDict`` wrapper.
Parameters
----------
extent
The extent of the rule. Available options include:
* ``"ci"``: Extend the rule to the 95% bootstrapped confidence interval of the mean.
* ``"stderr"``: The size of rule are set to the value of standard error, extending
from the mean.
* ``"stdev"``: The size of rule are set to the value of standard deviation,
extending from the mean.
* ``"iqr"``: Extend the rule to the q1 and q3.
**Default value:** ``"stderr"``.
rule
size
Size of the ticks of an error bar
thickness
Thickness of the ticks and the bar of an error bar
ticks
"""
extent: ErrorBarExtent_T
rule: (
bool
| BarConfigKwds
| AreaConfigKwds
| LineConfigKwds
| MarkConfigKwds
| RectConfigKwds
| TickConfigKwds
)
size: float
thickness: float
ticks: (
bool
| BarConfigKwds
| AreaConfigKwds
| LineConfigKwds
| MarkConfigKwds
| RectConfigKwds
| TickConfigKwds
)
|
ErrorBarConfigKwds
|
python
|
ray-project__ray
|
rllib/env/tests/test_env_runner_group.py
|
{
"start": 203,
"end": 4008
}
|
class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_foreach_env_runner(self):
"""Test to make sure basic sychronous calls to remote workers work."""
ws = EnvRunnerGroup(
config=(
PPOConfig().environment("CartPole-v1").env_runners(num_env_runners=2)
),
)
modules = ws.foreach_env_runner(
lambda w: w.module,
local_env_runner=True,
)
# 3 policies including the one from the local worker.
self.assertEqual(len(modules), 3)
for m in modules:
self.assertIsInstance(m, RLModule)
modules = ws.foreach_env_runner(
lambda w: w.module,
local_env_runner=False,
)
# 2 policies from only the remote workers.
self.assertEqual(len(modules), 2)
ws.stop()
def test_foreach_env_runner_return_obj_refss(self):
"""Test to make sure return_obj_refs parameter works."""
ws = EnvRunnerGroup(
config=(
PPOConfig().environment("CartPole-v1").env_runners(num_env_runners=2)
),
)
module_refs = ws.foreach_env_runner(
lambda w: isinstance(w.module, RLModule),
local_env_runner=False,
return_obj_refs=True,
)
# 2 policy references from remote workers.
self.assertEqual(len(module_refs), 2)
self.assertTrue(isinstance(module_refs[0], ray.ObjectRef))
self.assertTrue(isinstance(module_refs[1], ray.ObjectRef))
ws.stop()
def test_foreach_env_runner_async(self):
"""Test to make sure basic asychronous calls to remote workers work."""
ws = EnvRunnerGroup(
config=(
PPOConfig().environment("CartPole-v1").env_runners(num_env_runners=2)
),
)
# Fired async request against both remote workers.
self.assertEqual(
ws.foreach_env_runner_async(
lambda w: isinstance(w.module, RLModule),
),
2,
)
remote_results = ws.fetch_ready_async_reqs(timeout_seconds=None)
self.assertEqual(len(remote_results), 2)
for p in remote_results:
# p is in the format of (worker_id, result).
# First is the id of the remote worker.
self.assertTrue(p[0] in [1, 2])
# Next is the actual policy.
self.assertTrue(p[1])
ws.stop()
def test_foreach_env_runner_async_fetch_ready(self):
"""Test to make sure that test_foreach_env_runner_async_fetch_ready works."""
ws = EnvRunnerGroup(
config=(
PPOConfig()
.environment("CartPole-v1")
.env_runners(num_env_runners=2, rollout_fragment_length=1)
),
)
# Sample from both env runners.
# First call to foreach_env_runner_async_fetch_ready should not return ready results.
self.assertEqual(
len(
ws.foreach_env_runner_async_fetch_ready(
lambda w: w.sample(),
tag="sample",
)
),
0,
)
time.sleep(1)
# Second call to foreach_env_runner_async_fetch_ready should return ready results.
self.assertEqual(
len(
ws.foreach_env_runner_async_fetch_ready(
lambda w: w.sample(),
tag="sample",
)
),
2,
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
TestEnvRunnerGroup
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mssql/pyodbc.py
|
{
"start": 20167,
"end": 20285
}
|
class ____(_MSJsonPathType):
def get_dbapi_type(self, dbapi):
return dbapi.SQL_WVARCHAR
|
_JSONPathType_pyodbc
|
python
|
h5py__h5py
|
h5py/tests/test_file2.py
|
{
"start": 8805,
"end": 9712
}
|
class ____(TestCase):
def populate(self, f):
for i in range(100):
# Mix group and dataset creation.
if i % 10 == 0:
f.create_group(str(i))
else:
f[str(i)] = [i]
def test_track_order(self):
fname = self.mktemp()
f = h5py.File(fname, 'w', track_order=True) # creation order
self.populate(f)
self.assertEqual(list(f), [str(i) for i in range(100)])
f.close()
# Check order tracking after reopening the file
f2 = h5py.File(fname)
self.assertEqual(list(f2), [str(i) for i in range(100)])
def test_no_track_order(self):
fname = self.mktemp()
f = h5py.File(fname, 'w', track_order=False) # name alphanumeric
self.populate(f)
self.assertEqual(list(f),
sorted([str(i) for i in range(100)]))
|
TestTrackOrder
|
python
|
instagram__MonkeyType
|
monkeytype/stubs.py
|
{
"start": 17547,
"end": 17915
}
|
class ____(Stub):
def __init__(
self,
name: str,
typ: type,
) -> None:
self.name = name
self.typ = typ
def render(self, prefix: str = "") -> str:
return f"{prefix}{self.name}: {render_annotation(self.typ)}"
def __repr__(self) -> str:
return f"AttributeStub({self.name}, {self.typ})"
|
AttributeStub
|
python
|
apache__airflow
|
airflow-core/tests/unit/models/test_dag.py
|
{
"start": 76471,
"end": 96975
}
|
class ____:
def _clean(self):
clear_db_dags()
clear_db_assets()
clear_db_runs()
clear_db_dag_bundles()
clear_db_teams()
def setup_method(self):
self._clean()
def teardown_method(self):
self._clean()
def test_dags_needing_dagruns_not_too_early(self, testing_dag_bundle):
dag = DAG(dag_id="far_future_dag", schedule=None, start_date=timezone.datetime(2038, 1, 1))
EmptyOperator(task_id="dummy", dag=dag, owner="airflow")
session = settings.Session()
orm_dag = DagModel(
dag_id=dag.dag_id,
bundle_name="testing",
max_active_tasks=1,
has_task_concurrency_limits=False,
next_dagrun=dag.start_date,
next_dagrun_create_after=timezone.datetime(2038, 1, 2),
is_stale=False,
)
session.add(orm_dag)
session.flush()
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == []
session.rollback()
session.close()
def test_dags_needing_dagruns_assets(self, dag_maker, session):
asset = Asset(uri="test://asset", group="test-group")
with dag_maker(
session=session,
dag_id="my_dag",
max_active_runs=1,
schedule=[asset],
start_date=pendulum.now().add(days=-2),
) as dag:
EmptyOperator(task_id="dummy")
# there's no queue record yet, so no runs needed at this time.
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == []
# add queue records so we'll need a run
dag_model = session.query(DagModel).filter(DagModel.dag_id == dag.dag_id).one()
asset_model: AssetModel = dag_model.schedule_assets[0]
session.add(AssetDagRunQueue(asset_id=asset_model.id, target_dag_id=dag_model.dag_id))
session.flush()
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == [dag_model]
# create run so we don't need a run anymore (due to max active runs)
dag_maker.create_dagrun(
run_type=DagRunType.ASSET_TRIGGERED,
state=DagRunState.QUEUED,
logical_date=pendulum.now("UTC"),
)
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == []
# increase max active runs and we should now need another run
dag_maker.dag_model.max_active_runs = 2
session.flush()
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == [dag_model]
def test_dags_needing_dagruns_asset_aliases(self, dag_maker, session):
# link asset_alias hello_alias to asset hello
asset_model = AssetModel(uri="hello")
asset_alias_model = AssetAliasModel(name="hello_alias")
asset_alias_model.assets.append(asset_model)
session.add_all([asset_model, asset_alias_model])
session.commit()
with dag_maker(
session=session,
dag_id="my_dag",
max_active_runs=1,
schedule=[AssetAlias(name="hello_alias")],
start_date=pendulum.now().add(days=-2),
):
EmptyOperator(task_id="dummy")
# there's no queue record yet, so no runs needed at this time.
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == []
# add queue records so we'll need a run
dag_model = dag_maker.dag_model
session.add(AssetDagRunQueue(asset_id=asset_model.id, target_dag_id=dag_model.dag_id))
session.flush()
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == [dag_model]
# create run so we don't need a run anymore (due to max active runs)
dag_maker.create_dagrun(
run_type=DagRunType.ASSET_TRIGGERED,
state=DagRunState.QUEUED,
logical_date=pendulum.now("UTC"),
)
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == []
# increase max active runs and we should now need another run
dag_maker.dag_model.max_active_runs = 2
session.flush()
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == [dag_model]
@pytest.mark.parametrize("ref", [Asset.ref(name="1"), Asset.ref(uri="s3://bucket/assets/1")])
@pytest.mark.want_activate_assets
@pytest.mark.need_serialized_dag
def test_dags_needing_dagruns_asset_refs(self, dag_maker, session, ref):
asset = Asset(name="1", uri="s3://bucket/assets/1")
with dag_maker(dag_id="producer", schedule=None, session=session):
op = EmptyOperator(task_id="op", outlets=asset)
dr: DagRun = dag_maker.create_dagrun()
with dag_maker(dag_id="consumer", schedule=ref, max_active_runs=1):
pass
# Nothing from the upstream yet, no runs needed.
assert session.scalars(select(AssetDagRunQueue.target_dag_id)).all() == []
query, _ = DagModel.dags_needing_dagruns(session)
assert query.all() == []
# Upstream triggered, now we need a run.
ti = dr.get_task_instance("op")
ti.refresh_from_task(op)
ti.run()
assert session.scalars(select(AssetDagRunQueue.target_dag_id)).all() == ["consumer"]
query, _ = DagModel.dags_needing_dagruns(session)
assert [dm.dag_id for dm in query] == ["consumer"]
@pytest.mark.want_activate_assets
@pytest.mark.need_serialized_dag
def test_dags_needing_dagruns_checking_stale_adrq(self, dag_maker, session):
asset = Asset(name="1", uri="s3://bucket/assets/1")
dag_id_to_test = "test"
# Dag 'test' depends on an outlet in 'producer'.
with dag_maker(dag_id="producer", schedule=None, session=session):
op = EmptyOperator(task_id="op", outlets=asset)
dr = dag_maker.create_dagrun()
outlet_ti = dr.get_task_instance("op")
outlet_ti.refresh_from_task(op)
with dag_maker(dag_id=dag_id_to_test, schedule=asset, session=session):
pass
# An adrq should be created when the outlet task is run.
outlet_ti.run()
query, _ = DagModel.dags_needing_dagruns(session)
assert [dm.dag_id for dm in query] == [dag_id_to_test]
assert session.scalars(select(AssetDagRunQueue.target_dag_id)).all() == [dag_id_to_test]
# Now the dag is changed to NOT depend on 'producer'.
# Rerunning dags_needing_dagruns should clear up that adrq.
with dag_maker(dag_id=dag_id_to_test, schedule=None, session=session):
pass
query, _ = DagModel.dags_needing_dagruns(session)
assert query.all() == []
assert session.scalars(select(AssetDagRunQueue.target_dag_id)).all() == []
def test_max_active_runs_not_none(self, testing_dag_bundle):
dag = DAG(
dag_id="test_max_active_runs_not_none",
schedule=None,
start_date=timezone.datetime(2038, 1, 1),
)
EmptyOperator(task_id="dummy", dag=dag, owner="airflow")
session = settings.Session()
orm_dag = DagModel(
dag_id=dag.dag_id,
bundle_name="testing",
has_task_concurrency_limits=False,
next_dagrun=None,
next_dagrun_create_after=None,
is_stale=False,
)
# assert max_active_runs updated
assert orm_dag.max_active_runs == 16
session.add(orm_dag)
session.flush()
assert orm_dag.max_active_runs is not None
session.rollback()
session.close()
def test_dags_needing_dagruns_only_unpaused(self, testing_dag_bundle):
"""
We should never create dagruns for unpaused DAGs
"""
dag = DAG(dag_id="test_dags", schedule=None, start_date=DEFAULT_DATE)
EmptyOperator(task_id="dummy", dag=dag, owner="airflow")
session = settings.Session()
orm_dag = DagModel(
dag_id=dag.dag_id,
bundle_name="testing",
has_task_concurrency_limits=False,
next_dagrun=DEFAULT_DATE,
next_dagrun_create_after=DEFAULT_DATE + timedelta(days=1),
is_stale=False,
)
session.merge(orm_dag)
session.flush()
query, _ = DagModel.dags_needing_dagruns(session)
needed = query.all()
assert [d.dag_id for d in needed] == [orm_dag.dag_id]
orm_dag.is_paused = True
session.merge(orm_dag)
session.flush()
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == []
session.rollback()
session.close()
def test_dags_needing_dagruns_doesnot_send_dagmodel_with_import_errors(self, session, testing_dag_bundle):
"""
We check that has_import_error is false for dags
being set to scheduler to create dagruns
"""
dag = DAG(dag_id="test_dags", schedule=None, start_date=DEFAULT_DATE)
EmptyOperator(task_id="dummy", dag=dag, owner="airflow")
orm_dag = DagModel(
dag_id=dag.dag_id,
bundle_name="testing",
has_task_concurrency_limits=False,
next_dagrun=DEFAULT_DATE,
next_dagrun_create_after=DEFAULT_DATE + timedelta(days=1),
is_stale=False,
)
assert not orm_dag.has_import_errors
session.add(orm_dag)
session.flush()
query, _ = DagModel.dags_needing_dagruns(session)
needed = query.all()
assert needed == [orm_dag]
orm_dag.has_import_errors = True
session.merge(orm_dag)
session.flush()
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
assert dag_models == []
session.rollback()
session.close()
def test_relative_fileloc(self, session, testing_dag_bundle):
rel_path = "test_assets.py"
bundle_path = TEST_DAGS_FOLDER
file_path = bundle_path / rel_path
bag = DagBag(dag_folder=file_path, bundle_path=bundle_path)
dag = bag.get_dag("dag_with_skip_task")
bundle_name = "testing"
dag_model = DagModel(
dag_id=dag.dag_id,
bundle_name=bundle_name,
)
session.merge(dag_model)
session.flush()
sync_dag_to_db(dag, session=session)
assert dag.fileloc == str(file_path)
assert dag.relative_fileloc == str(rel_path)
SerializedDagModel.write_dag(
LazyDeserializedDAG.from_dag(dag),
bundle_name=bundle_name,
session=session,
)
dm = session.get(DagModel, dag.dag_id)
assert dm.fileloc == str(file_path)
assert dm.relative_fileloc == str(rel_path)
sdm = session.scalar(select(SerializedDagModel).where(SerializedDagModel.dag_id == dag.dag_id))
assert sdm.dag.fileloc == str(file_path)
assert sdm.dag.relative_fileloc == str(rel_path)
def test__processor_dags_folder(self, session, testing_dag_bundle):
"""Only populated after deserializtion"""
bundle_name = "testing"
dag = DAG(dag_id="test", schedule=None)
dag.fileloc = "/abc/test.py"
dag_model = DagModel(
dag_id=dag.dag_id,
bundle_name=bundle_name,
)
session.merge(dag_model)
session.flush()
scheduler_dag = sync_dag_to_db(dag)
assert scheduler_dag._processor_dags_folder == settings.DAGS_FOLDER
sdm = SerializedDagModel.get(dag.dag_id, session)
assert sdm.dag._processor_dags_folder == settings.DAGS_FOLDER
@pytest.mark.need_serialized_dag
def test_dags_needing_dagruns_triggered_date_by_dag_queued_times(self, session, dag_maker):
asset1 = Asset(uri="test://asset1", group="test-group")
asset2 = Asset(uri="test://asset2", name="test_asset_2", group="test-group")
for dag_id, asset in [("assets-1", asset1), ("assets-2", asset2)]:
with dag_maker(dag_id=dag_id, start_date=timezone.utcnow(), session=session):
EmptyOperator(task_id="task", outlets=[asset])
dr = dag_maker.create_dagrun()
asset_id = session.query(AssetModel.id).filter_by(uri=asset.uri).scalar()
session.add(
AssetEvent(
asset_id=asset_id,
source_task_id="task",
source_dag_id=dr.dag_id,
source_run_id=dr.run_id,
source_map_index=-1,
)
)
asset1_id = session.query(AssetModel.id).filter_by(uri=asset1.uri).scalar()
asset2_id = session.query(AssetModel.id).filter_by(uri=asset2.uri).scalar()
with dag_maker(dag_id="assets-consumer-multiple", schedule=[asset1, asset2]) as dag:
pass
session.flush()
session.add_all(
[
AssetDagRunQueue(asset_id=asset1_id, target_dag_id=dag.dag_id, created_at=DEFAULT_DATE),
AssetDagRunQueue(
asset_id=asset2_id,
target_dag_id=dag.dag_id,
created_at=DEFAULT_DATE + timedelta(hours=1),
),
]
)
session.flush()
query, triggered_date_by_dag = DagModel.dags_needing_dagruns(session)
assert len(triggered_date_by_dag) == 1
assert dag.dag_id in triggered_date_by_dag
last_queued_time = triggered_date_by_dag[dag.dag_id]
assert last_queued_time == DEFAULT_DATE + timedelta(hours=1)
def test_asset_expression(self, session: Session, testing_dag_bundle) -> None:
dag = DAG(
dag_id="test_dag_asset_expression",
schedule=AssetAny(
Asset(uri="s3://dag1/output_1.txt", extra={"hi": "bye"}, group="test-group"),
AssetAll(
Asset(
uri="s3://dag2/output_1.txt",
name="test_asset_2",
extra={"hi": "bye"},
group="test-group",
),
Asset("s3://dag3/output_3.txt", extra={"hi": "bye"}, group="test-group"),
AssetAll(
AssetAll(
Asset("s3://dag3/output_4.txt", extra={"hi": "bye"}, group="test-group"),
Asset("s3://dag3/output_5.txt", extra={"hi": "bye"}, group="test-group"),
),
Asset("s3://dag3/output_6.txt", extra={"hi": "bye"}, group="test-group"),
),
),
AssetAlias(name="test_name", group="test-group"),
),
start_date=datetime.datetime.min,
)
SerializedDAG.bulk_write_to_db("testing", None, [dag], session=session)
expression = session.scalars(select(DagModel.asset_expression).filter_by(dag_id=dag.dag_id)).one()
assert expression == {
"any": [
{
"asset": {
"uri": "s3://dag1/output_1.txt",
"name": "s3://dag1/output_1.txt",
"group": "test-group",
"id": ANY,
}
},
{
"all": [
{
"asset": {
"uri": "s3://dag2/output_1.txt",
"name": "test_asset_2",
"group": "test-group",
"id": ANY,
}
},
{
"asset": {
"uri": "s3://dag3/output_3.txt",
"name": "s3://dag3/output_3.txt",
"group": "test-group",
"id": ANY,
}
},
{
"all": [
{
"all": [
{
"asset": {
"uri": "s3://dag3/output_4.txt",
"name": "s3://dag3/output_4.txt",
"group": "test-group",
"id": ANY,
}
},
{
"asset": {
"uri": "s3://dag3/output_5.txt",
"name": "s3://dag3/output_5.txt",
"group": "test-group",
"id": ANY,
}
},
],
},
{
"asset": {
"uri": "s3://dag3/output_6.txt",
"name": "s3://dag3/output_6.txt",
"group": "test-group",
"id": ANY,
},
},
]
},
]
},
{"alias": {"name": "test_name", "group": "test-group"}},
]
}
def test_get_team_name(self, testing_team):
session = settings.Session()
dag_bundle = DagBundleModel(name="testing-team")
dag_bundle.teams.append(testing_team)
session.add(dag_bundle)
session.flush()
dag_id = "test_get_team_name"
dag = DAG(dag_id, schedule=None)
orm_dag = DagModel(
dag_id=dag.dag_id,
bundle_name="testing-team",
is_stale=False,
)
session.add(orm_dag)
session.flush()
assert DagModel.get_dagmodel(dag_id) is not None
assert DagModel.get_team_name(dag_id, session=session) == "testing"
def test_get_team_name_no_team(self, testing_team):
session = settings.Session()
dag_bundle = DagBundleModel(name="testing")
session.add(dag_bundle)
session.flush()
dag_id = "test_get_team_name_no_team"
dag = DAG(dag_id, schedule=None)
orm_dag = DagModel(
dag_id=dag.dag_id,
bundle_name="testing",
is_stale=False,
)
session.add(orm_dag)
session.flush()
assert DagModel.get_dagmodel(dag_id) is not None
assert DagModel.get_team_name(dag_id, session=session) is None
def test_get_dag_id_to_team_name_mapping(self, testing_team):
session = settings.Session()
bundle1 = DagBundleModel(name="bundle1")
bundle1.teams.append(testing_team)
bundle2 = DagBundleModel(name="bundle2")
session.add(bundle1)
session.add(bundle2)
session.flush()
dag_id1 = "test_dag1"
dag1 = DAG(dag_id1, schedule=None)
orm_dag1 = DagModel(
dag_id=dag1.dag_id,
bundle_name="bundle1",
is_stale=False,
)
dag_id2 = "test_dag2"
dag2 = DAG(dag_id2, schedule=None)
orm_dag2 = DagModel(
dag_id=dag2.dag_id,
bundle_name="bundle2",
is_stale=False,
)
session.add(orm_dag1)
session.add(orm_dag2)
session.flush()
assert DagModel.get_dag_id_to_team_name_mapping([dag_id1, dag_id2], session=session) == {
dag_id1: "testing"
}
|
TestDagModel
|
python
|
skorch-dev__skorch
|
skorch/_version.py
|
{
"start": 4568,
"end": 7833
}
|
class ____(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# its adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
|
LegacyVersion
|
python
|
Netflix__metaflow
|
test/test_included_modules/my_decorators.py
|
{
"start": 1474,
"end": 1590
}
|
class ____(StepMutator):
def mutate(self, mutable_step):
mutable_step.add_decorator(time_step)
|
AddTimeStep
|
python
|
pydata__xarray
|
xarray/tests/test_dataset.py
|
{
"start": 300843,
"end": 303747
}
|
class ____:
def test_from_numpy(self) -> None:
ds = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", [4, 5, 6])})
assert_identical(ds.as_numpy(), ds)
@requires_dask
def test_from_dask(self) -> None:
ds = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", [4, 5, 6])})
ds_chunked = ds.chunk(1)
assert_identical(ds_chunked.as_numpy(), ds.compute())
@requires_pint
def test_from_pint(self) -> None:
from pint import Quantity
arr = np.array([1, 2, 3])
ds = xr.Dataset(
{"a": ("x", Quantity(arr, units="Pa"))},
coords={"lat": ("x", Quantity(arr + 3, units="m"))},
)
expected = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", arr + 3)})
assert_identical(ds.as_numpy(), expected)
@requires_sparse
def test_from_sparse(self) -> None:
import sparse
arr = np.diagflat([1, 2, 3])
sparr = sparse.COO.from_numpy(arr)
ds = xr.Dataset(
{"a": (["x", "y"], sparr)}, coords={"elev": (("x", "y"), sparr + 3)}
)
expected = xr.Dataset(
{"a": (["x", "y"], arr)}, coords={"elev": (("x", "y"), arr + 3)}
)
assert_identical(ds.as_numpy(), expected)
@requires_cupy
def test_from_cupy(self) -> None:
import cupy as cp
arr = np.array([1, 2, 3])
ds = xr.Dataset(
{"a": ("x", cp.array(arr))}, coords={"lat": ("x", cp.array(arr + 3))}
)
expected = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", arr + 3)})
assert_identical(ds.as_numpy(), expected)
@requires_dask
@requires_pint
def test_from_pint_wrapping_dask(self) -> None:
import dask
from pint import Quantity
arr = np.array([1, 2, 3])
d = dask.array.from_array(arr)
ds = xr.Dataset(
{"a": ("x", Quantity(d, units="Pa"))},
coords={"lat": ("x", Quantity(d, units="m") * 2)},
)
result = ds.as_numpy()
expected = xr.Dataset({"a": ("x", arr)}, coords={"lat": ("x", arr * 2)})
assert_identical(result, expected)
def test_string_keys_typing() -> None:
"""Tests that string keys to `variables` are permitted by mypy"""
da = xr.DataArray(np.arange(10), dims=["x"])
ds = xr.Dataset(dict(x=da))
mapping = {"y": da}
ds.assign(variables=mapping)
def test_transpose_error() -> None:
# Transpose dataset with list as argument
# Should raise error
ds = xr.Dataset({"foo": (("x", "y"), [[21]]), "bar": (("x", "y"), [[12]])})
with pytest.raises(
TypeError,
match=re.escape(
"transpose requires dim to be passed as multiple arguments. Expected `'y', 'x'`. Received `['y', 'x']` instead"
),
):
ds.transpose(["y", "x"]) # type: ignore[arg-type]
|
TestNumpyCoercion
|
python
|
huggingface__transformers
|
src/transformers/models/table_transformer/modeling_table_transformer.py
|
{
"start": 12733,
"end": 13606
}
|
class ____(nn.Module):
"""
This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder.
"""
def __init__(self, conv_encoder, position_embedding):
super().__init__()
self.conv_encoder = conv_encoder
self.position_embedding = position_embedding
def forward(self, pixel_values, pixel_mask):
# send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples
out = self.conv_encoder(pixel_values, pixel_mask)
pos = []
for feature_map, mask in out:
# position encoding
pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype))
return out, pos
# Copied from transformers.models.detr.modeling_detr.DetrSinePositionEmbedding with Detr->TableTransformer
|
TableTransformerConvModel
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/handlers/condition/issue_occurrences_handler.py
|
{
"start": 349,
"end": 1519
}
|
class ____(DataConditionHandler[WorkflowEventData]):
group = DataConditionHandler.Group.ACTION_FILTER
subgroup = DataConditionHandler.Subgroup.ISSUE_ATTRIBUTES
comparison_json_schema = {
"type": "object",
"properties": {
"value": {"type": "integer", "minimum": 0},
},
"required": ["value"],
"additionalProperties": False,
}
@staticmethod
def evaluate_value(event_data: WorkflowEventData, comparison: Any) -> bool:
group: Group = event_data.group
try:
value = int(comparison["value"])
except (TypeError, ValueError, KeyError):
return False
# This value is slightly delayed due to us batching writes to times_seen. We attempt to work
# around this by including pending updates from buffers to improve accuracy.
try:
issue_occurrences: int = group.times_seen_with_pending
except AssertionError:
# This is a fallback for when times_seen_pending has not yet been set
issue_occurrences = group.times_seen
return bool(issue_occurrences >= value)
|
IssueOccurrencesConditionHandler
|
python
|
vyperlang__vyper
|
vyper/venom/passes/memmerging.py
|
{
"start": 419,
"end": 685
}
|
class ____:
start: int
length: int
@property
def end(self):
return self.start + self.length
def overlaps(self, other):
a = max(self.start, other.start)
b = min(self.end, other.end)
return a < b
@dataclass
|
_Interval
|
python
|
apache__airflow
|
providers/common/sql/tests/unit/common/sql/operators/test_sql.py
|
{
"start": 33554,
"end": 35626
}
|
class ____:
def setup_method(self):
self.task_id = "test_task"
self.conn_id = "default_conn"
def _construct_operator(self, sql, pass_value, tolerance=None):
dag = DAG("test_dag", schedule=None, start_date=datetime.datetime(2017, 1, 1))
return SQLValueCheckOperator(
dag=dag,
task_id=self.task_id,
conn_id=self.conn_id,
sql=sql,
pass_value=pass_value,
tolerance=tolerance,
)
def test_pass_value_template_string(self):
pass_value_str = "2018-03-22"
operator = self._construct_operator("select date from tab1;", "{{ ds }}")
operator.render_template_fields({"ds": pass_value_str})
assert operator.task_id == self.task_id
assert operator.pass_value == pass_value_str
def test_pass_value_template_string_float(self):
pass_value_float = 4.0
operator = self._construct_operator("select date from tab1;", pass_value_float)
operator.render_template_fields({})
assert operator.task_id == self.task_id
assert operator.pass_value == str(pass_value_float)
@mock.patch.object(SQLValueCheckOperator, "get_db_hook")
def test_execute_pass(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [10]
mock_get_db_hook.return_value = mock_hook
sql = "select value from tab1 limit 1;"
operator = self._construct_operator(sql, 5, 1)
operator.execute(None)
mock_hook.get_first.assert_called_once_with(sql, None)
@mock.patch.object(SQLValueCheckOperator, "get_db_hook")
def test_execute_fail(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [11]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("select value from tab1 limit 1;", 5, 1)
with pytest.raises(AirflowException, match="Tolerance:100.0%"):
operator.execute(context=MagicMock())
|
TestValueCheckOperator
|
python
|
spyder-ide__spyder
|
spyder/utils/workers.py
|
{
"start": 867,
"end": 2345
}
|
class ____(QObject):
"""
Generic python worker for running python code on threads.
For running processes (via QProcess) use the ProcessWorker.
"""
sig_started = Signal(object)
sig_finished = Signal(object, object, object) # worker, stdout, stderr
def __init__(self, func, args, kwargs):
"""Generic python worker for running python code on threads."""
super().__init__()
self.func = func
self.args = args
self.kwargs = kwargs
self._is_finished = False
self._started = False
def is_finished(self):
"""Return True if worker status is finished otherwise return False."""
return self._is_finished
def start(self):
"""Start the worker (emits sig_started signal with worker as arg)."""
if not self._started:
self.sig_started.emit(self)
self._started = True
def terminate(self):
"""Mark the worker as finished."""
self._is_finished = True
def _start(self):
"""Start process worker for given method args and kwargs."""
error = None
output = None
try:
output = self.func(*self.args, **self.kwargs)
except Exception as err:
error = err
if not self._is_finished:
try:
self.sig_finished.emit(self, output, error)
except RuntimeError:
pass
self._is_finished = True
|
PythonWorker
|
python
|
astropy__astropy
|
astropy/io/ascii/basic.py
|
{
"start": 5552,
"end": 5984
}
|
class ____(Basic):
"""Tab-separated table.
Unlike the :class:`Basic` reader, whitespace is not stripped from the
beginning and end of either lines or individual column values.
Example::
col1 <tab> col2 <tab> col3
# Comment line
1 <tab> 2 <tab> 5
"""
_format_name = "tab"
_description = "Basic table with tab-separated values"
header_class = TabHeader
data_class = TabData
|
Tab
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/widgets/toolbars.py
|
{
"start": 10846,
"end": 11207
}
|
class ____:
def __init__(self) -> None:
self.container = ConditionalContainer(
content=Window(
_CompletionsToolbarControl(), height=1, style="class:completion-toolbar"
),
filter=has_completions,
)
def __pt_container__(self) -> Container:
return self.container
|
CompletionsToolbar
|
python
|
pypa__pip
|
src/pip/_internal/metadata/__init__.py
|
{
"start": 3023,
"end": 5824
}
|
class ____(Protocol):
NAME: Literal["importlib", "pkg_resources"]
Distribution: type[BaseDistribution]
Environment: type[BaseEnvironment]
@functools.cache
def select_backend() -> Backend:
if _should_use_importlib_metadata():
from . import importlib
return cast(Backend, importlib)
_emit_pkg_resources_deprecation_if_needed()
from . import pkg_resources
return cast(Backend, pkg_resources)
def get_default_environment() -> BaseEnvironment:
"""Get the default representation for the current environment.
This returns an Environment instance from the chosen backend. The default
Environment instance should be built from ``sys.path`` and may use caching
to share instance state across calls.
"""
return select_backend().Environment.default()
def get_environment(paths: list[str] | None) -> BaseEnvironment:
"""Get a representation of the environment specified by ``paths``.
This returns an Environment instance from the chosen backend based on the
given import paths. The backend must build a fresh instance representing
the state of installed distributions when this function is called.
"""
return select_backend().Environment.from_paths(paths)
def get_directory_distribution(directory: str) -> BaseDistribution:
"""Get the distribution metadata representation in the specified directory.
This returns a Distribution instance from the chosen backend based on
the given on-disk ``.dist-info`` directory.
"""
return select_backend().Distribution.from_directory(directory)
def get_wheel_distribution(
wheel: Wheel, canonical_name: NormalizedName
) -> BaseDistribution:
"""Get the representation of the specified wheel's distribution metadata.
This returns a Distribution instance from the chosen backend based on
the given wheel's ``.dist-info`` directory.
:param canonical_name: Normalized project name of the given wheel.
"""
return select_backend().Distribution.from_wheel(wheel, canonical_name)
def get_metadata_distribution(
metadata_contents: bytes,
filename: str,
canonical_name: str,
) -> BaseDistribution:
"""Get the dist representation of the specified METADATA file contents.
This returns a Distribution instance from the chosen backend sourced from the data
in `metadata_contents`.
:param metadata_contents: Contents of a METADATA file within a dist, or one served
via PEP 658.
:param filename: Filename for the dist this metadata represents.
:param canonical_name: Normalized project name of the given dist.
"""
return select_backend().Distribution.from_metadata_file_contents(
metadata_contents,
filename,
canonical_name,
)
|
Backend
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-docarray/llama_index/vector_stores/docarray/base.py
|
{
"start": 543,
"end": 6759
}
|
class ____(VectorStore, ABC):
"""
DocArray Vector Store Base Class.
This is an abstract base class for creating a DocArray vector store.
The subclasses should implement _init_index and _find_docs_to_be_removed methods.
"""
# for mypy. will get initialized by the subclass.
_index: Any
_schema: Any
_ref_docs: Dict[str, List[str]]
stores_text: bool = True
flat_metadata: bool = False
def _update_ref_docs(self, docs) -> None: # type: ignore[no-untyped-def]
pass
@abstractmethod
def _init_index(self, **kwargs: Any): # type: ignore[no-untyped-def]
"""
Initializes the index.
This method should be overridden by the subclasses.
"""
@abstractmethod
def _find_docs_to_be_removed(self, doc_id: str) -> List[str]:
"""
Finds the documents to be removed from the vector store.
Args:
doc_id (str): Document ID that should be removed.
Returns:
List[str]: List of document IDs to be removed.
This is an abstract method and needs to be implemented in any concrete subclass.
"""
@property
def client(self) -> Any:
"""Get client."""
return None
def num_docs(self) -> int:
"""
Retrieves the number of documents in the index.
Returns:
int: The number of documents in the index.
"""
return self._index.num_docs()
@staticmethod
def _get_schema(**embeddings_params: Any) -> Type:
"""
Fetches the schema for DocArray indices.
Args:
**embeddings_params: Variable length argument list for the embedding.
Returns:
DocArraySchema: Schema for a DocArray index.
"""
from docarray import BaseDoc
from docarray.typing import ID, NdArray
class DocArraySchema(BaseDoc):
id: Optional[ID] = None
text: Optional[str] = None
metadata: Optional[dict] = None
embedding: NdArray = Field(**embeddings_params)
return DocArraySchema
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Adds nodes to the vector store.
Args:
nodes (List[BaseNode]): List of nodes with embeddings.
Returns:
List[str]: List of document IDs added to the vector store.
"""
from docarray import DocList
# check to see if empty document list was passed
if len(nodes) == 0:
return []
docs = DocList[self._schema]( # type: ignore[name-defined]
self._schema(
id=node.node_id,
metadata=node_to_metadata_dict(node, flat_metadata=self.flat_metadata),
text=node.get_content(metadata_mode=MetadataMode.NONE),
embedding=node.get_embedding(),
)
for node in nodes
)
self._index.index(docs)
logger.info(f"Successfully added {len(docs)} documents to the index")
if self._ref_docs is not None:
self._update_ref_docs(docs)
return [doc.id for doc in docs]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Deletes a document from the vector store.
Args:
ref_doc_id (str): Document ID to be deleted.
**delete_kwargs (Any): Additional arguments to pass to the delete method.
"""
docs_to_be_removed = self._find_docs_to_be_removed(ref_doc_id)
if not docs_to_be_removed:
logger.warning(f"Document with doc_id {ref_doc_id} not found")
return
del self._index[docs_to_be_removed]
logger.info(f"Deleted {len(docs_to_be_removed)} documents from the index")
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Queries the vector store and retrieves the results.
Args:
query (VectorStoreQuery): Query for the vector store.
Returns:
VectorStoreQueryResult: Result of the query from vector store.
"""
if query.filters:
# only for ExactMatchFilters
filter_query = {
"metadata__" + filter.key: {"$eq": filter.value}
for filter in query.filters.legacy_filters()
}
query = (
self._index.build_query() # get empty query object
.find(
query=self._schema(embedding=np.array(query.query_embedding)),
search_field="embedding",
limit=query.similarity_top_k,
) # add vector similarity search
.filter(filter_query=filter_query) # add filter search
.build() # build the query
)
# execute the combined query and return the results
docs, scores = self._index.execute_query(query)
else:
docs, scores = self._index.find(
query=self._schema(embedding=np.array(query.query_embedding)),
search_field="embedding",
limit=query.similarity_top_k,
)
nodes, ids = [], []
for doc in docs:
try:
node = metadata_dict_to_node(doc.metadata)
node.text = doc.text
except Exception:
# TODO: legacy metadata support
metadata, node_info, relationships = legacy_metadata_dict_to_node(
doc.metadata
)
node = TextNode(
id_=doc.id,
text=doc.text,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships=relationships,
)
nodes.append(node)
ids.append(doc.id)
logger.info(f"Found {len(nodes)} results for the query")
return VectorStoreQueryResult(nodes=nodes, ids=ids, similarities=scores)
|
DocArrayVectorStore
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/models/test_sources.py
|
{
"start": 19081,
"end": 37420
}
|
class ____:
__test__ = is_installed("pandas")
def test_init_dataframe_arg(self) -> None:
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = bms.ColumnDataSource(df)
assert set(df.columns).issubset(set(ds.column_names))
for key in data.keys():
assert isinstance(ds.data[key], np.ndarray)
assert list(df[key]) == list(ds.data[key])
assert isinstance(ds.data['index'], np.ndarray)
assert [0, 1] == list(ds.data['index'])
assert set(ds.column_names) - set(df.columns) == {"index"}
assert ds.length == 2
def test_init_dataframe_column_categoricalindex(self) -> None:
columns = pd.CategoricalIndex(['a', 'b'])
data = [[0,2], [1,3]]
df = pd.DataFrame(columns=columns, data=data)
ds = bms.ColumnDataSource(data=df)
assert set(df.columns).issubset(set(ds.column_names))
for key in columns:
assert isinstance(ds.data[key], np.ndarray)
assert list(df[key]) == list(ds.data[key])
assert isinstance(ds.data['index'], np.ndarray)
assert [0, 1] == list(ds.data['index'])
assert set(ds.column_names) - set(df.columns) == {"index"}
assert ds.length == 2
def test_data_accepts_dataframe_column_categoricalindex(self) -> None:
columns = pd.CategoricalIndex(['a', 'b'])
data = [[0,2], [1,3]]
df = pd.DataFrame(columns=columns, data=data)
ds = bms.ColumnDataSource()
assert ds.data == {}
ds.data = df
assert set(df.columns).issubset(set(ds.column_names))
for key in columns:
assert isinstance(ds.data[key], np.ndarray)
assert list(df[key]) == list(ds.data[key])
assert isinstance(ds.data['index'], np.ndarray)
assert [0, 1] == list(ds.data['index'])
assert set(ds.column_names) - set(df.columns) == {"index"}
assert ds.length == 2
def test_init_dataframe_nonstring_named_column(self) -> None:
data = {1: [1, 2], 2: [2, 3]}
df = pd.DataFrame(data)
with pytest.raises(ValueError, match=r'expected a dict of type.*'):
bms.ColumnDataSource(data=df)
def test_init_dataframe_nonstring_named_multicolumn(self) -> None:
data = {(1, 2): [1, 2], (2, 3): [2, 3]}
df = pd.DataFrame(data)
with pytest.raises(TypeError, match=r'Could not flatten.*'):
bms.ColumnDataSource(data=df)
def test_init_groupby_arg(self) -> None:
group = df.groupby(by=['origin', 'cyl'])
ds = bms.ColumnDataSource(group)
s = group.describe()
assert len(ds.column_names) == 49
assert ds.length == 9
assert isinstance(ds.data['origin_cyl'], np.ndarray)
for key in s.columns.values:
k2 = "_".join(key)
assert isinstance(ds.data[k2], np.ndarray)
assert list(s[key]) == list(ds.data[k2])
def test_data_accepts_groupby_arg(self) -> None:
group = df.groupby(by=['origin', 'cyl'])
ds = bms.ColumnDataSource()
assert ds.data == {}
ds.data = group
s = group.describe()
assert len(ds.column_names) == 49
assert ds.length == 9
assert isinstance(ds.data['origin_cyl'], np.ndarray)
for key in s.columns.values:
k2 = "_".join(key)
assert isinstance(ds.data[k2], np.ndarray)
assert list(s[key]) == list(ds.data[k2])
def test_init_groupby_data_kwarg(self) -> None:
group = df.groupby(by=['origin', 'cyl'])
ds = bms.ColumnDataSource(data=group)
s = group.describe()
assert len(ds.column_names) == 49
assert ds.length == 9
assert isinstance(ds.data['origin_cyl'], np.ndarray)
for key in s.columns.values:
k2 = "_".join(key)
assert isinstance(ds.data[k2], np.ndarray)
assert list(s[key]) == list(ds.data[k2])
def test_init_groupby_with_None_subindex_name(self) -> None:
df = pd.DataFrame({"A": [1, 2, 3, 4] * 2, "B": [10, 20, 30, 40] * 2, "C": range(8)})
group = df.groupby(['A', [10, 20, 30, 40] * 2])
ds = bms.ColumnDataSource(data=group)
s = group.describe()
assert len(ds.column_names) == 17
assert ds.length == 4
assert isinstance(ds.data['index'], np.ndarray)
for key in s.columns.values:
k2 = "_".join(key)
assert isinstance(ds.data[k2], np.ndarray)
assert list(s[key]) == list(ds.data[k2])
def test_data_accepts_groupby_with_None_subindex_name(self) -> None:
df = pd.DataFrame({"A": [1, 2, 3, 4] * 2, "B": [10, 20, 30, 40] * 2, "C": range(8)})
group = df.groupby(['A', [10, 20, 30, 40] * 2])
ds = bms.ColumnDataSource()
assert ds.data == {}
ds.data = group
s = group.describe()
assert len(ds.column_names) == 17
assert ds.length == 4
assert isinstance(ds.data['index'], np.ndarray)
for key in s.columns.values:
k2 = "_".join(key)
assert isinstance(ds.data[k2], np.ndarray)
assert list(s[key]) == list(ds.data[k2])
def test__stream_good_df_with_date_index_data(self) -> None:
df = pd.DataFrame(
index=pd.date_range('now', periods=30, freq='min'),
columns=['A'],
data=np.cumsum(np.random.standard_normal(30), axis=0),
)
ds = bms.ColumnDataSource(data=df)
ds._document = "doc"
stuff = {}
mock_setter = object()
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._stream = mock
new_df = pd.DataFrame(
index=df.index + pd.to_timedelta('30m'),
columns=df.columns,
data=np.random.standard_normal(30),
)
ds._stream(new_df, "foo", mock_setter)
assert np.array_equal(stuff['args'][2]['index'], new_df.index.values)
assert np.array_equal(stuff['args'][2]['A'], new_df.A.values)
def test__stream_good_dict_of_index_and_series_data(self) -> None:
df = pd.DataFrame(
index=pd.date_range('now', periods=30, freq='min'),
columns=['A'],
data=np.cumsum(np.random.standard_normal(30), axis=0),
)
ds = bms.ColumnDataSource(data=df)
ds._document = "doc"
stuff = {}
mock_setter = object()
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._stream = mock
new_df = pd.DataFrame(
index=df.index + pd.to_timedelta('30m'),
columns=df.columns,
data=np.random.standard_normal(30),
)
ds._stream({'index': new_df.index, 'A': new_df.A}, "foo", mock_setter)
assert np.array_equal(stuff['args'][2]['index'], new_df.index.values)
assert np.array_equal(stuff['args'][2]['A'], new_df.A.values)
def test__stream_good_dict_of_index_and_series_data_transformed(self) -> None:
df = pd.DataFrame(
index=pd.date_range('now', periods=30, freq='min'),
columns=['A'],
data=np.cumsum(np.random.standard_normal(30), axis=0),
)
ds = bms.ColumnDataSource(data={'index': convert_datetime_array(df.index.values),
'A': df.A})
ds._document = "doc"
stuff = {}
mock_setter = object()
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._stream = mock
new_df = pd.DataFrame(
index=df.index + pd.to_timedelta('30m'),
columns=df.columns,
data=np.random.standard_normal(30),
)
ds._stream({'index': new_df.index, 'A': new_df.A}, "foo", mock_setter)
assert np.array_equal(stuff['args'][2]['index'], convert_datetime_array(new_df.index.values))
assert np.array_equal(stuff['args'][2]['A'], new_df.A.values)
def _assert_equal_dicts_of_arrays(self, d1, d2):
assert d1.keys() == d2.keys()
for k, v in d1.items():
assert type(v) is np.ndarray
assert np.array_equal(v, d2[k])
def test_stream_dict_to_ds_created_from_df(self) -> None:
data = pd.DataFrame(dict(a=[10], b=[20], c=[30])).set_index('c')
ds = bms.ColumnDataSource(data)
ds._document = "doc"
notify_owners_stuff = {}
def notify_owners_mock(*args, **kw):
notify_owners_stuff['args'] = args
notify_owners_stuff['kw'] = kw
ds.data._notify_owners = notify_owners_mock
stream_stuff = {}
data_stream = ds.data._stream
def stream_wrapper(*args, **kwargs):
stream_stuff['args'] = args
stream_stuff['kwargs'] = kwargs
data_stream(*args, **kwargs)
ds.data._stream = stream_wrapper
ds._stream(dict(a=[11, 12],
b=np.array([21, 22]),
c=pd.Series([31, 32])), 7)
assert len(stream_stuff['args']) == 5
expected_stream_args = ("doc", ds, dict(a=[11, 12],
b=np.array([21, 22]),
c=pd.Series([31, 32])), 7, None)
for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'],
expected_stream_args)):
if i == 2:
assert arg['a'] == ex_arg['a']
del arg['a'], ex_arg['a']
self._assert_equal_dicts_of_arrays(arg, ex_arg)
else:
assert arg == ex_arg
assert stream_stuff['kwargs'] == {}
assert len(notify_owners_stuff['args']) == 1
self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0],
dict(a=np.array([10]),
b=np.array([20]),
c=np.array([30])))
self._assert_equal_dicts_of_arrays(dict(ds.data),
dict(a=np.array([10, 11, 12]),
b=np.array([20, 21, 22]),
c=np.array([30, 31, 32])))
def test_stream_series_to_ds_created_from_df(self) -> None:
data = pd.DataFrame(dict(a=[10], b=[20], c=[30]))
ds = bms.ColumnDataSource(data)
ds._document = "doc"
notify_owners_stuff = {}
def notify_owners_mock(*args, **kw):
notify_owners_stuff['args'] = args
notify_owners_stuff['kw'] = kw
ds.data._notify_owners = notify_owners_mock
stream_stuff = {}
data_stream = ds.data._stream
def stream_wrapper(*args, **kwargs):
stream_stuff['args'] = args
stream_stuff['kwargs'] = kwargs
data_stream(*args, **kwargs)
ds.data._stream = stream_wrapper
ds._stream(pd.Series([11, 21, 31], index=list('abc')), 7)
assert len(stream_stuff['args']) == 5
expected_df = pd.DataFrame(dict(a=np.array([11]),
b=np.array([21]),
c=np.array([31])))
expected_stream_data = expected_df.to_dict('series')
expected_stream_data['index'] = expected_df.index.values
expected_args = ("doc", ds, expected_stream_data, 7, None)
for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'], expected_args)):
if i == 2:
self._assert_equal_dicts_of_arrays(arg, ex_arg)
else:
assert arg == ex_arg
assert stream_stuff['kwargs'] == {}
assert len(notify_owners_stuff['args']) == 1
self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0],
dict(a=np.array([10]),
b=np.array([20]),
c=np.array([30]),
index=np.array([0])))
self._assert_equal_dicts_of_arrays(dict(ds.data),
dict(a=np.array([10, 11]),
b=np.array([20, 21]),
c=np.array([30, 31]),
index=np.array([0, 0])))
def test_stream_df_to_ds_created_from_df_named_index(self) -> None:
data = pd.DataFrame(dict(a=[10], b=[20], c=[30])).set_index('c')
ds = bms.ColumnDataSource(data)
ds._document = "doc"
notify_owners_stuff = {}
def notify_owners_mock(*args, **kw):
notify_owners_stuff['args'] = args
notify_owners_stuff['kw'] = kw
ds.data._notify_owners = notify_owners_mock
stream_stuff = {}
data_stream = ds.data._stream
def stream_wrapper(*args, **kwargs):
stream_stuff['args'] = args
stream_stuff['kwargs'] = kwargs
data_stream(*args, **kwargs)
ds.data._stream = stream_wrapper
ds._stream(pd.DataFrame(dict(a=[11, 12],
b=[21, 22],
c=[31, 32])).set_index('c'), 7)
assert len(stream_stuff['args']) == 5
expected_steam_data = dict(a=np.array([11, 12]),
b=np.array([21, 22]),
c=np.array([31, 32]))
expected_args = ("doc", ds, expected_steam_data, 7, None)
for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'], expected_args)):
if i == 2:
assert arg.keys() == ex_arg.keys()
for k, v in arg.items():
assert np.array_equal(v, ex_arg[k])
else:
assert stream_stuff['args'][i] == expected_args[i]
assert stream_stuff['kwargs'] == {}
assert len(notify_owners_stuff['args']) == 1
self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0],
dict(a=np.array([10]),
b=np.array([20]),
c=np.array([30])))
self._assert_equal_dicts_of_arrays(dict(ds.data),
dict(a=np.array([10, 11, 12]),
b=np.array([20, 21, 22]),
c=np.array([30, 31, 32])))
def test_stream_df_to_ds_created_from_df_default_index(self) -> None:
data = pd.DataFrame(dict(a=[10], b=[20], c=[30]))
ds = bms.ColumnDataSource(data)
ds._document = "doc"
notify_owners_stuff = {}
def notify_owners_mock(*args, **kw):
notify_owners_stuff['args'] = args
notify_owners_stuff['kw'] = kw
ds.data._notify_owners = notify_owners_mock
stream_stuff = {}
data_stream = ds.data._stream
def stream_wrapper(*args, **kwargs):
stream_stuff['args'] = args
stream_stuff['kwargs'] = kwargs
data_stream(*args, **kwargs)
ds.data._stream = stream_wrapper
ds._stream(pd.DataFrame(dict(a=[11, 12],
b=[21, 22],
c=[31, 32])), 7)
assert len(stream_stuff['args']) == 5
expected_df = pd.DataFrame(dict(a=np.array([11, 12]),
b=np.array([21, 22]),
c=np.array([31, 32])))
expected_stream_data = expected_df.to_dict('series')
expected_stream_data['index'] = expected_df.index.values
expected_args = ("doc", ds, expected_stream_data, 7, None)
for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'], expected_args)):
if i == 2:
for k, v in arg.items():
assert np.array_equal(v, ex_arg[k])
else:
assert stream_stuff['args'][i] == expected_args[i]
assert stream_stuff['kwargs'] == {}
assert len(notify_owners_stuff['args']) == 1
self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0],
dict(a=np.array([10]),
b=np.array([20]),
c=np.array([30]),
index=np.array([0])))
self._assert_equal_dicts_of_arrays(dict(ds.data),
dict(a=np.array([10, 11, 12]),
b=np.array([20, 21, 22]),
c=np.array([30, 31, 32]),
index=np.array([0, 0, 1])))
def test__df_index_name_with_named_index(self) -> None:
df = pd.DataFrame(dict(a=[10], b=[20], c=[30])).set_index('c')
assert bms.ColumnDataSource._df_index_name(df) == "c"
def test__df_index_name_with_unnamed_index(self) -> None:
df = pd.DataFrame(dict(a=[10], b=[20], c=[30]))
assert bms.ColumnDataSource._df_index_name(df) == "index"
def test__df_index_name_with_named_multi_index(self) -> None:
data = io.StringIO("""\
Fruit,Color,Count,Price
Apple,Red,3,$1.29
Apple,Green,9,$0.99
Pear,Red,25,$2.59
Pear,Green,26,$2.79
Lime,Green,99,$0.39
""")
df = pd.read_csv(data).set_index(['Fruit', 'Color'])
assert df.index.names == ['Fruit', 'Color']
assert bms.ColumnDataSource._df_index_name(df) == "Fruit_Color"
def test__df_index_name_with_unnamed_multi_index(self) -> None:
arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']),
np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])]
df = pd.DataFrame(np.random.randn(8, 4), index=arrays)
assert df.index.names == [None, None]
assert bms.ColumnDataSource._df_index_name(df) == "index"
|
TestColumnDataSourcePandas
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pep8_naming/N805.py
|
{
"start": 1869,
"end": 1974
}
|
class ____:
def formula(household):
hºusehold(1)
from typing import Protocol
|
RenamingWithNFKC
|
python
|
huggingface__transformers
|
src/transformers/models/falcon/modeling_falcon.py
|
{
"start": 21641,
"end": 27131
}
|
class ____(FalconAttention):
"""
Falcon flash attention module. This module inherits from `FalconAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
def forward(
self,
hidden_states: torch.Tensor,
alibi: Optional[torch.Tensor],
attention_mask: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
layer_past: Optional[Cache] = None,
use_cache: bool = False,
output_attentions: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
):
fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size]
num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads
# 3 x [batch_size, seq_length, num_heads, head_dim]
(query_layer, key_layer, value_layer) = self._split_heads(fused_qkv)
batch_size, query_length, _, _ = query_layer.shape
query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim)
key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim)
value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim)
if alibi is None:
cos, sin = position_embeddings
query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin)
if layer_past is not None:
cache_kwargs = {"cache_position": cache_position}
if alibi is None:
cache_kwargs.update({"sin": sin, "cos": cos})
key_layer, value_layer = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs)
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
# to be able to avoid many of these transpose/reshape/view.
query_layer = query_layer.transpose(1, 2)
key_layer = key_layer.transpose(1, 2)
value_layer = value_layer.transpose(1, 2)
if alibi is not None:
raise ValueError("`alibi` is not supported when `use_flash_attn` is True")
attn_dropout = self.config.attention_dropout if self.training else 0.0
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in float16 just to be sure everything works as expected.
input_dtype = query_layer.dtype
device_type = query_layer.device.type if query_layer.device.type != "mps" else "cpu"
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
# NOTE: `torch.get_autocast_dtype` is there starting from PyTorch 2.4
target_dtype = (
torch.get_autocast_dtype(device_type)
if hasattr(torch, "get_autocast_dtype")
else torch.get_autocast_gpu_dtype()
)
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.query_key_value.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
f" {target_dtype}."
)
query_layer = query_layer.to(target_dtype)
key_layer = key_layer.to(target_dtype)
value_layer = value_layer.to(target_dtype)
attn_output = _flash_attention_forward(
query_layer,
key_layer,
value_layer,
attention_mask,
query_length,
position_ids=position_ids,
dropout=attn_dropout,
is_causal=self.is_causal,
use_top_left_mask=self._flash_attn_uses_top_left_mask,
)
attn_weights = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim)
attn_output = self.dense(attn_weights)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
|
FalconFlashAttention2
|
python
|
python-openxml__python-docx
|
tests/image/test_png.py
|
{
"start": 12897,
"end": 13639
}
|
class ____:
def it_can_construct_from_a_stream_and_offset(self, from_offset_fixture):
stream_rdr, offset, px_width, px_height = from_offset_fixture
ihdr_chunk = _IHDRChunk.from_offset(None, stream_rdr, offset)
assert isinstance(ihdr_chunk, _IHDRChunk)
assert ihdr_chunk.px_width == px_width
assert ihdr_chunk.px_height == px_height
# fixtures -------------------------------------------------------
@pytest.fixture
def from_offset_fixture(self):
bytes_ = b"\x00\x00\x00\x2a\x00\x00\x00\x18"
stream_rdr = StreamReader(io.BytesIO(bytes_), BIG_ENDIAN)
offset, px_width, px_height = 0, 42, 24
return stream_rdr, offset, px_width, px_height
|
Describe_IHDRChunk
|
python
|
pytorch__pytorch
|
torch/_inductor/autoheuristic/autoheuristic.py
|
{
"start": 1163,
"end": 1407
}
|
class ____(Exception):
"""
Exception that is thrown when AutoHeuristic tries to log data to a file where the metadata stored in the file does
not match the metadata it would store if the file didn't exist.
"""
|
InconsistentMetadata
|
python
|
apache__airflow
|
airflow-core/tests/unit/models/test_taskinstance.py
|
{
"start": 4655,
"end": 5218
}
|
class ____:
task_id: str | None = None
dag_id: str | None = None
logical_date: datetime.datetime | None = None
task_state_in_callback: str | None = None
callback_ran = False
def wrap_task_instance(self, ti):
self.task_id = ti.task_id
self.dag_id = ti.dag_id
self.logical_date = ti.logical_date
self.task_state_in_callback = ""
self.callback_ran = False
def success_handler(self, context):
self.callback_ran = True
self.task_state_in_callback = context["ti"].state
|
CallbackWrapper
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE796.py
|
{
"start": 120,
"end": 185
}
|
class ____(Enum):
A = 1
B = 2
C = 2 # PIE796
|
FakeEnum2
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/webmisc.py
|
{
"start": 37898,
"end": 39891
}
|
class ____(ExtendedRegexLexer):
"""
For Slim markup.
.. versionadded:: 2.0
"""
name = 'Slim'
aliases = ['slim']
filenames = ['*.slim']
mimetypes = ['text/x-slim']
flags = re.IGNORECASE
_dot = r'(?: \|\n(?=.* \|)|.)'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'([ \t]*==?)(.*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'[ \t]+[\w:-]+(?==)', Name.Attribute, 'html-attributes'),
default('plain'),
],
'content': [
include('css'),
(r'[\w:-]+:[ \t]*\n', Text, 'plain'),
(r'(-)(.*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r'\|' + _dot + r'*\n', _starts_block(Text, 'plain'), '#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment.Preproc, 'slim-comment-block'), '#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
(r'[ \t]+\n', Punctuation, '#pop:2'),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(.*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'=', Punctuation),
(r'"[^"]+"', using(RubyLexer), 'tag'),
(r'\'[^\']+\'', using(RubyLexer), 'tag'),
(r'\w+', Text, 'tag'),
],
'slim-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
}
|
SlimLexer
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/system_config/objects.py
|
{
"start": 2582,
"end": 14003
}
|
class ____(
NamedTuple(
"_ResolvedRunConfig",
[
("ops", Mapping[str, OpConfig]),
("execution", "ExecutionConfig"),
("resources", Mapping[str, ResourceConfig]),
("loggers", Mapping[str, Mapping[str, object]]),
("original_config_dict", Any),
("inputs", Mapping[str, Any]),
],
)
):
def __new__(
cls,
ops: Optional[Mapping[str, OpConfig]] = None,
execution: Optional["ExecutionConfig"] = None,
resources: Optional[Mapping[str, ResourceConfig]] = None,
loggers: Optional[Mapping[str, Mapping[str, object]]] = None,
original_config_dict: Optional[Mapping[str, object]] = None,
inputs: Optional[Mapping[str, object]] = None,
):
check.opt_inst_param(execution, "execution", ExecutionConfig)
check.opt_mapping_param(original_config_dict, "original_config_dict")
resources = check.opt_mapping_param(resources, "resources", key_type=str)
inputs = check.opt_mapping_param(inputs, "inputs", key_type=str)
if execution is None:
execution = ExecutionConfig(None, None)
return super().__new__(
cls,
ops=check.opt_mapping_param(ops, "ops", key_type=str, value_type=OpConfig),
execution=execution,
resources=resources,
loggers=check.opt_mapping_param(loggers, "loggers", key_type=str, value_type=Mapping),
original_config_dict=original_config_dict,
inputs=inputs,
)
@staticmethod
def build(
job_def: JobDefinition,
run_config: Optional[Mapping[str, object]] = None,
) -> "ResolvedRunConfig":
"""This method validates a given run config against the pipeline config schema. If
successful, we instantiate an ResolvedRunConfig object.
In case the run_config is invalid, this method raises a DagsterInvalidConfigError
"""
from dagster._config import process_config
from dagster._core.system_config.composite_descent import composite_descent
check.inst_param(job_def, "job_def", JobDefinition)
run_config = check.opt_mapping_param(run_config, "run_config")
run_config_schema = job_def.run_config_schema
if run_config_schema.config_mapping:
# add user code boundary
with user_code_error_boundary(
DagsterConfigMappingFunctionError,
lambda: (
f"The config mapping function on job {job_def.name} has"
" thrown an unexpected error during its execution."
),
):
run_config = run_config_schema.config_mapping.resolve_from_unvalidated_config(
run_config
)
config_evr = process_config(
run_config_schema.run_config_schema_type, check.not_none(run_config)
)
if not config_evr.success:
raise DagsterInvalidConfigError(
f"Error in config for job {job_def.name}",
config_evr.errors,
run_config,
)
config_value = cast("dict[str, Any]", config_evr.value)
# If using the `execute_in_process` executor, we ignore the execution config value, since it
# may be pointing to the executor for the job rather than the `execute_in_process` executor.
if job_def.executor_def == execute_in_process_executor:
config_mapped_execution_configs: Optional[Mapping[str, Any]] = {}
else:
executor_config = config_value.get("execution", {})
config_mapped_execution_configs = config_map_executor(
executor_config, job_def.executor_def
)
resource_defs = job_def.get_required_resource_defs()
resource_configs = config_value.get("resources", {})
config_mapped_resource_configs = config_map_resources(resource_defs, resource_configs)
config_mapped_logger_configs = config_map_loggers(job_def, config_value)
op_config_dict = composite_descent(
job_def, config_value.get("ops", {}), job_def.resource_defs
)
input_configs = config_value.get("inputs", {})
return ResolvedRunConfig(
ops=op_config_dict,
execution=ExecutionConfig.from_dict(config_mapped_execution_configs),
loggers=config_mapped_logger_configs,
original_config_dict=run_config,
resources=config_mapped_resource_configs,
inputs=input_configs,
)
def to_dict(self) -> Mapping[str, Mapping[str, object]]:
env_dict: dict[str, Mapping[str, object]] = {}
op_configs: dict[str, object] = {}
for op_name, op_config in self.ops.items():
op_configs[op_name] = {
"config": op_config.config,
"inputs": op_config.inputs,
"outputs": op_config.outputs.config,
}
env_dict["ops"] = op_configs
env_dict["execution"] = (
{self.execution.execution_engine_name: self.execution.execution_engine_config}
if self.execution.execution_engine_name
else {}
)
env_dict["resources"] = {
resource_name: {"config": resource_config.config}
for resource_name, resource_config in self.resources.items()
}
env_dict["loggers"] = self.loggers
return env_dict
def config_map_executor(
executor_config: Mapping[str, Any],
executor_def: ExecutorDefinition,
) -> Mapping[str, object]:
executor_config_evr = executor_def.apply_config_mapping(executor_config)
if not executor_config_evr.success:
raise DagsterInvalidConfigError(
f"Invalid configuration provided for executor '{executor_def.name}'",
executor_config_evr.errors,
executor_config,
)
return {executor_def.name: executor_config_evr.value}
def config_map_resources(
resource_defs: Mapping[str, ResourceDefinition],
resource_configs: Mapping[str, object],
) -> Mapping[str, ResourceConfig]:
"""This function executes the config mappings for resources with respect to ConfigurableDefinition.
It iterates over resource_defs and looks up the corresponding config because resources need to
be mapped regardless of whether they receive config from run_config.
"""
config_mapped_resource_configs = {}
for resource_key, resource_def in resource_defs.items():
resource_config = resource_configs.get(resource_key, {})
resource_config_evr = resource_def.apply_config_mapping(resource_config)
if not resource_config_evr.success:
raise DagsterInvalidConfigError(
f"Error in config for resource {resource_key}",
resource_config_evr.errors,
resource_config,
)
else:
config_mapped_resource_configs[resource_key] = ResourceConfig.from_dict(
check.not_none(resource_config_evr.value)
)
return config_mapped_resource_configs
def config_map_loggers(
job_def: JobDefinition,
config_value: Mapping[str, Any],
) -> Mapping[str, Any]:
"""This function executes the config mappings for loggers with respect to ConfigurableDefinition.
It uses the `loggers` key on the run_config to determine which loggers will be initialized (and
thus which ones need config mapping) and then iterates over each, looking up the corresponding
LoggerDefinition in `mode_def.loggers`.
The following are the cases of run_config and loggers on mode_def that could emerge
Run Config Loggers on Mode Def Behavior Which Loggers Need Config Mapping?
------------------------------------- -------------------- -------------------------------------------------------------- -------------------------------------
{} or {'loggers': <dict or None>} [] default system loggers with default config all loggers on run config (empty set)
{} or {'loggers': <dict or None>} [custom_logger, ...] default system loggers with default config all loggers on run config (empty set)
{'loggers': {'custom_logger': <dict or None>}} [custom_logger, ...] use only the loggers listed in run_config all loggers on run config
{'loggers': {'console': <dict or None>}} [] use only the loggers listed in run_config (with default defs) all loggers on run config
The behavior of `run_config.loggers` as a source of truth for logger selection comes from:
python_modules/dagster/dagster/_core/execution/context_creation_pipeline.py#create_log_manager
See that codepath for more info on how the behavior in the above table is implemented. The logic
in that function is tightly coupled to this one and changes in either path should be confirmed
in the other.
"""
logger_configs = config_value.get("loggers", {})
config_mapped_logger_configs = {}
for logger_key, logger_config in logger_configs.items():
logger_def = job_def.loggers.get(logger_key)
if logger_def is None:
check.failed(f"No logger found for key {logger_key}")
logger_config_evr = logger_def.apply_config_mapping(logger_config)
if not logger_config_evr.success:
raise DagsterInvalidConfigError(
f"Error in config for logger {logger_key}",
logger_config_evr.errors,
logger_config,
)
else:
config_mapped_logger_configs[logger_key] = logger_config_evr.value
return config_mapped_logger_configs
def config_map_objects(
config_value: Any,
defs: Sequence[ExecutorDefinition],
keyed_by: str,
def_type: type,
name_of_def_type: str,
) -> Optional[Mapping[str, Any]]:
"""This function executes the config mappings for executors definitions with respect to
ConfigurableDefinition. It calls the ensure_single_item macro on the incoming config and then
applies config mapping to the result and the first executor_def with the same name on
the mode_def.
"""
config = config_value.get(keyed_by)
check.opt_mapping_param(config, "config", key_type=str)
if not config:
return None
obj_name, obj_config = ensure_single_item(config)
obj_def = next(
(defi for defi in defs if defi.name == obj_name), None
) # obj_defs are stored in a list and we want to find the def matching name
check.inst(
obj_def,
def_type,
f"Could not find a {def_type} definition on the selected mode that matches the "
f'{def_type} "{obj_name}" given in run config',
)
obj_def = cast("ConfigurableDefinition", obj_def)
obj_config_evr = obj_def.apply_config_mapping(obj_config)
if not obj_config_evr.success:
raise DagsterInvalidConfigError(
f'Invalid configuration provided for {name_of_def_type} "{obj_name}"',
obj_config_evr.errors,
obj_config,
)
return {obj_name: obj_config_evr.value}
|
ResolvedRunConfig
|
python
|
rapidsai__cudf
|
python/cudf/cudf/pandas/_benchmarks/pdsh.py
|
{
"start": 701,
"end": 14788
}
|
class ____:
"""PDS-H query definitions."""
name: str = "pdsh"
@staticmethod
def q0(run_config: RunConfig) -> pd.DataFrame:
"""Query 0."""
return pd.DataFrame()
@staticmethod
def q1(run_config: RunConfig) -> pd.DataFrame:
"""Query 1."""
line_item_ds = get_data(
run_config.dataset_path, "lineitem", run_config.suffix
)
var1 = date(1998, 9, 2)
filt = line_item_ds[line_item_ds["l_shipdate"] <= var1]
# This is lenient towards pandas as normally an optimizer should decide
# that this could be computed before the groupby aggregation.
# Other implementations don't enjoy this benefit.
filt["disc_price"] = filt.l_extendedprice * (1.0 - filt.l_discount)
filt["charge"] = (
filt.l_extendedprice * (1.0 - filt.l_discount) * (1.0 + filt.l_tax)
)
gb = filt.groupby(["l_returnflag", "l_linestatus"], as_index=False)
agg = gb.agg(
sum_qty=pd.NamedAgg(column="l_quantity", aggfunc="sum"),
sum_base_price=pd.NamedAgg(
column="l_extendedprice", aggfunc="sum"
),
sum_disc_price=pd.NamedAgg(column="disc_price", aggfunc="sum"),
sum_charge=pd.NamedAgg(column="charge", aggfunc="sum"),
avg_qty=pd.NamedAgg(column="l_quantity", aggfunc="mean"),
avg_price=pd.NamedAgg(column="l_extendedprice", aggfunc="mean"),
avg_disc=pd.NamedAgg(column="l_discount", aggfunc="mean"),
count_order=pd.NamedAgg(column="l_orderkey", aggfunc="size"),
)
return agg.sort_values(["l_returnflag", "l_linestatus"])
@staticmethod
def q2(run_config: RunConfig) -> pd.DataFrame:
"""Query 2."""
nation = get_data(run_config.dataset_path, "nation", run_config.suffix)
part = get_data(run_config.dataset_path, "part", run_config.suffix)
partsupp = get_data(
run_config.dataset_path, "partsupp", run_config.suffix
)
region = get_data(run_config.dataset_path, "region", run_config.suffix)
supplier = get_data(
run_config.dataset_path, "supplier", run_config.suffix
)
var1 = 15
var2 = "BRASS"
var3 = "EUROPE"
jn = (
part.merge(partsupp, left_on="p_partkey", right_on="ps_partkey")
.merge(supplier, left_on="ps_suppkey", right_on="s_suppkey")
.merge(nation, left_on="s_nationkey", right_on="n_nationkey")
.merge(region, left_on="n_regionkey", right_on="r_regionkey")
)
jn = jn[jn["p_size"] == var1]
jn = jn[jn["p_type"].str.endswith(var2)]
jn = jn[jn["r_name"] == var3]
gb = jn.groupby("p_partkey", as_index=False)
agg = gb["ps_supplycost"].min()
jn2 = agg.merge(jn, on=["p_partkey", "ps_supplycost"])
sel = jn2.loc[
:,
[
"s_acctbal",
"s_name",
"n_name",
"p_partkey",
"p_mfgr",
"s_address",
"s_phone",
"s_comment",
],
]
sort = sel.sort_values(
by=["s_acctbal", "n_name", "s_name", "p_partkey"],
ascending=[False, True, True, True],
)
return sort.head(100)
@staticmethod
def q3(run_config: RunConfig) -> pd.DataFrame:
"""Query 3."""
customer = get_data(
run_config.dataset_path, "customer", run_config.suffix
)
lineitem = get_data(
run_config.dataset_path, "lineitem", run_config.suffix
)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
var1 = "BUILDING"
var2 = date(1995, 3, 15)
fcustomer = customer[customer["c_mktsegment"] == var1]
jn1 = fcustomer.merge(
orders, left_on="c_custkey", right_on="o_custkey"
)
jn2 = jn1.merge(lineitem, left_on="o_orderkey", right_on="l_orderkey")
jn2 = jn2[jn2["o_orderdate"] < var2]
jn2 = jn2[jn2["l_shipdate"] > var2]
jn2["revenue"] = jn2.l_extendedprice * (1 - jn2.l_discount)
gb = jn2.groupby(
["o_orderkey", "o_orderdate", "o_shippriority"], as_index=False
)
agg = gb["revenue"].sum()
sel = agg.loc[
:, ["o_orderkey", "revenue", "o_orderdate", "o_shippriority"]
]
sel = sel.rename(columns={"o_orderkey": "l_orderkey"})
sorted_df = sel.sort_values(
by=["revenue", "o_orderdate"], ascending=[False, True]
)
return sorted_df.head(10)
@staticmethod
def q4(run_config: RunConfig) -> pd.DataFrame:
"""Query 4."""
lineitem = get_data(
run_config.dataset_path, "lineitem", run_config.suffix
)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
var1 = date(1993, 7, 1)
var2 = date(1993, 10, 1)
jn = lineitem.merge(
orders, left_on="l_orderkey", right_on="o_orderkey"
)
jn = jn[(jn["o_orderdate"] >= var1) & (jn["o_orderdate"] < var2)]
jn = jn[jn["l_commitdate"] < jn["l_receiptdate"]]
jn = jn.drop_duplicates(subset=["o_orderpriority", "l_orderkey"])
gb = jn.groupby("o_orderpriority", as_index=False)
agg = gb.agg(
order_count=pd.NamedAgg(column="o_orderkey", aggfunc="count")
)
return agg.sort_values(["o_orderpriority"])
@staticmethod
def q5(run_config: RunConfig) -> pd.DataFrame:
"""Query 5."""
path = run_config.dataset_path
suffix = run_config.suffix
customer = get_data(path, "customer", suffix)
lineitem = get_data(path, "lineitem", suffix)
nation = get_data(path, "nation", suffix)
orders = get_data(path, "orders", suffix)
region = get_data(path, "region", suffix)
supplier = get_data(path, "supplier", suffix)
var1 = "ASIA"
var2 = date(1994, 1, 1)
var3 = date(1995, 1, 1)
jn1 = region.merge(
nation, left_on="r_regionkey", right_on="n_regionkey"
)
jn2 = jn1.merge(
customer, left_on="n_nationkey", right_on="c_nationkey"
)
jn3 = jn2.merge(orders, left_on="c_custkey", right_on="o_custkey")
jn4 = jn3.merge(lineitem, left_on="o_orderkey", right_on="l_orderkey")
jn5 = jn4.merge(
supplier,
left_on=["l_suppkey", "n_nationkey"],
right_on=["s_suppkey", "s_nationkey"],
)
jn5 = jn5[jn5["r_name"] == var1]
jn5 = jn5[(jn5["o_orderdate"] >= var2) & (jn5["o_orderdate"] < var3)]
jn5["revenue"] = jn5.l_extendedprice * (1.0 - jn5.l_discount)
gb = jn5.groupby("n_name", as_index=False)["revenue"].sum()
return gb.sort_values("revenue", ascending=False)
@staticmethod
def q6(run_config: RunConfig) -> pd.DataFrame:
"""Query 6."""
path = run_config.dataset_path
suffix = run_config.suffix
lineitem = get_data(path, "lineitem", suffix)
var1 = date(1994, 1, 1)
var2 = date(1995, 1, 1)
var3 = 0.05
var4 = 0.07
var5 = 24
filt = lineitem[
(lineitem["l_shipdate"] >= var1) & (lineitem["l_shipdate"] < var2)
]
filt = filt[
(filt["l_discount"] >= var3) & (filt["l_discount"] <= var4)
]
filt = filt[filt["l_quantity"] < var5]
result_value = (filt["l_extendedprice"] * filt["l_discount"]).sum()
return pd.DataFrame({"revenue": [result_value]})
@staticmethod
def q7(run_config: RunConfig) -> pd.DataFrame:
"""Query 7."""
customer = get_data(
run_config.dataset_path, "customer", run_config.suffix
)
lineitem = get_data(
run_config.dataset_path, "lineitem", run_config.suffix
)
nation = get_data(run_config.dataset_path, "nation", run_config.suffix)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
supplier = get_data(
run_config.dataset_path, "supplier", run_config.suffix
)
var1 = "FRANCE"
var2 = "GERMANY"
var3 = date(1995, 1, 1)
var4 = date(1996, 12, 31)
n1 = nation[(nation["n_name"] == var1)]
n2 = nation[(nation["n_name"] == var2)]
# Part 1
jn1 = customer.merge(n1, left_on="c_nationkey", right_on="n_nationkey")
jn2 = jn1.merge(orders, left_on="c_custkey", right_on="o_custkey")
jn2 = jn2.rename(columns={"n_name": "cust_nation"})
jn3 = jn2.merge(lineitem, left_on="o_orderkey", right_on="l_orderkey")
jn4 = jn3.merge(supplier, left_on="l_suppkey", right_on="s_suppkey")
jn5 = jn4.merge(n2, left_on="s_nationkey", right_on="n_nationkey")
df1 = jn5.rename(columns={"n_name": "supp_nation"})
# Part 2
jn1 = customer.merge(n2, left_on="c_nationkey", right_on="n_nationkey")
jn2 = jn1.merge(orders, left_on="c_custkey", right_on="o_custkey")
jn2 = jn2.rename(columns={"n_name": "cust_nation"})
jn3 = jn2.merge(lineitem, left_on="o_orderkey", right_on="l_orderkey")
jn4 = jn3.merge(supplier, left_on="l_suppkey", right_on="s_suppkey")
jn5 = jn4.merge(n1, left_on="s_nationkey", right_on="n_nationkey")
df2 = jn5.rename(columns={"n_name": "supp_nation"})
# Combine
total = pd.concat([df1, df2])
total = total[
(total["l_shipdate"] >= var3) & (total["l_shipdate"] <= var4)
]
total["volume"] = total["l_extendedprice"] * (
1.0 - total["l_discount"]
)
total["l_year"] = total["l_shipdate"].dt.year
gb = total.groupby(
["supp_nation", "cust_nation", "l_year"], as_index=False
)
agg = gb.agg(revenue=pd.NamedAgg(column="volume", aggfunc="sum"))
return agg.sort_values(by=["supp_nation", "cust_nation", "l_year"])
@staticmethod
def q8(run_config: RunConfig) -> pd.DataFrame:
"""Query 8."""
customer = get_data(
run_config.dataset_path, "customer", run_config.suffix
)
lineitem = get_data(
run_config.dataset_path, "lineitem", run_config.suffix
)
nation = get_data(run_config.dataset_path, "nation", run_config.suffix)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
part = get_data(run_config.dataset_path, "part", run_config.suffix)
region = get_data(run_config.dataset_path, "region", run_config.suffix)
supplier = get_data(
run_config.dataset_path, "supplier", run_config.suffix
)
var1 = "BRAZIL"
var2 = "AMERICA"
var3 = "ECONOMY ANODIZED STEEL"
var4 = date(1995, 1, 1)
var5 = date(1996, 12, 31)
n1 = nation.loc[:, ["n_nationkey", "n_regionkey"]]
n2 = nation.loc[:, ["n_nationkey", "n_name"]]
jn1 = part.merge(lineitem, left_on="p_partkey", right_on="l_partkey")
jn2 = jn1.merge(supplier, left_on="l_suppkey", right_on="s_suppkey")
jn3 = jn2.merge(orders, left_on="l_orderkey", right_on="o_orderkey")
jn4 = jn3.merge(customer, left_on="o_custkey", right_on="c_custkey")
jn5 = jn4.merge(n1, left_on="c_nationkey", right_on="n_nationkey")
jn6 = jn5.merge(region, left_on="n_regionkey", right_on="r_regionkey")
jn6 = jn6[(jn6["r_name"] == var2)]
jn7 = jn6.merge(n2, left_on="s_nationkey", right_on="n_nationkey")
jn7 = jn7[(jn7["o_orderdate"] >= var4) & (jn7["o_orderdate"] <= var5)]
jn7 = jn7[jn7["p_type"] == var3]
jn7["o_year"] = jn7["o_orderdate"].dt.year
jn7["volume"] = jn7["l_extendedprice"] * (1.0 - jn7["l_discount"])
jn7 = jn7.rename(columns={"n_name": "nation"})
def udf(df: pd.DataFrame) -> float:
demonimator: float = df["volume"].sum()
df = df[df["nation"] == var1]
numerator: float = df["volume"].sum()
return round(numerator / demonimator, 2)
gb = jn7.groupby("o_year", as_index=False)
agg = gb.apply(udf, include_groups=False)
agg.columns = ["o_year", "mkt_share"]
return agg.sort_values("o_year")
@staticmethod
def q9(run_config: RunConfig) -> pd.DataFrame:
"""Query 9."""
path = run_config.dataset_path
suffix = run_config.suffix
lineitem = get_data(path, "lineitem", suffix)
nation = get_data(path, "nation", suffix)
orders = get_data(path, "orders", suffix)
part = get_data(path, "part", suffix)
partsupp = get_data(path, "partsupp", suffix)
supplier = get_data(path, "supplier", suffix)
jn1 = part.merge(partsupp, left_on="p_partkey", right_on="ps_partkey")
jn2 = jn1.merge(supplier, left_on="ps_suppkey", right_on="s_suppkey")
jn3 = jn2.merge(
lineitem,
left_on=["p_partkey", "ps_suppkey"],
right_on=["l_partkey", "l_suppkey"],
)
jn4 = jn3.merge(orders, left_on="l_orderkey", right_on="o_orderkey")
jn5 = jn4.merge(nation, left_on="s_nationkey", right_on="n_nationkey")
jn5 = jn5[jn5["p_name"].str.contains("green", regex=False)]
jn5["o_year"] = jn5["o_orderdate"].dt.year
jn5["amount"] = jn5["l_extendedprice"] * (1.0 - jn5["l_discount"]) - (
jn5["ps_supplycost"] * jn5["l_quantity"]
)
jn5 = jn5.rename(columns={"n_name": "nation"})
gb = jn5.groupby(["nation", "o_year"], as_index=False, sort=False)
agg = gb.agg(sum_profit=pd.NamedAgg(column="amount", aggfunc="sum"))
sorted_df = agg.sort_values(
by=["nation", "o_year"], ascending=[True, False]
)
return sorted_df.reset_index(drop=True)
if __name__ == "__main__":
run_pandas(PDSHQueries)
|
PDSHQueries
|
python
|
ray-project__ray
|
python/ray/tune/logger/tensorboardx.py
|
{
"start": 6176,
"end": 12347
}
|
class ____(LoggerCallback):
"""TensorBoardX Logger.
Note that hparams will be written only after a trial has terminated.
This logger automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
_SAVED_FILE_TEMPLATES = ["events.out.tfevents.*"]
VALID_HPARAMS = (str, bool, int, float, list, type(None))
VALID_NP_HPARAMS = (np.bool_, np.float32, np.float64, np.int32, np.int64)
def __init__(self):
try:
from tensorboardX import SummaryWriter
self._summary_writer_cls = SummaryWriter
except ImportError:
if log_once("tbx-install"):
logger.info('pip install "ray[tune]" to see TensorBoard files.')
raise
self._trial_writer: Dict["Trial", SummaryWriter] = {}
self._trial_result: Dict["Trial", Dict] = {}
def log_trial_start(self, trial: "Trial"):
if trial in self._trial_writer:
self._trial_writer[trial].close()
trial.init_local_path()
self._trial_writer[trial] = self._summary_writer_cls(
trial.local_path, flush_secs=30
)
self._trial_result[trial] = {}
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
if trial not in self._trial_writer:
self.log_trial_start(trial)
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
tmp = result.copy()
for k in ["config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION]:
if k in tmp:
del tmp[k] # not useful to log these
flat_result = flatten_dict(tmp, delimiter="/")
path = ["ray", "tune"]
valid_result = {}
for attr, value in flat_result.items():
full_attr = "/".join(path + [attr])
if isinstance(value, tuple(VALID_SUMMARY_TYPES)) and not np.isnan(value):
valid_result[full_attr] = value
self._trial_writer[trial].add_scalar(full_attr, value, global_step=step)
elif (isinstance(value, list) and len(value) > 0) or (
isinstance(value, np.ndarray) and value.size > 0
):
valid_result[full_attr] = value
# Must be a single image.
if isinstance(value, np.ndarray) and value.ndim == 3:
self._trial_writer[trial].add_image(
full_attr,
value,
global_step=step,
)
continue
# Must be a batch of images.
if isinstance(value, np.ndarray) and value.ndim == 4:
self._trial_writer[trial].add_images(
full_attr,
value,
global_step=step,
)
continue
# Must be video
if isinstance(value, np.ndarray) and value.ndim == 5:
self._trial_writer[trial].add_video(
full_attr, value, global_step=step, fps=20
)
continue
try:
self._trial_writer[trial].add_histogram(
full_attr, value, global_step=step
)
# In case TensorboardX still doesn't think it's a valid value
# (e.g. `[[]]`), warn and move on.
except (ValueError, TypeError):
if log_once("invalid_tbx_value"):
logger.warning(
"You are trying to log an invalid value ({}={}) "
"via {}!".format(full_attr, value, type(self).__name__)
)
self._trial_result[trial] = valid_result
self._trial_writer[trial].flush()
def log_trial_end(self, trial: "Trial", failed: bool = False):
if trial in self._trial_writer:
if trial and trial.evaluated_params and self._trial_result[trial]:
flat_result = flatten_dict(self._trial_result[trial], delimiter="/")
scrubbed_result = {
k: value
for k, value in flat_result.items()
if isinstance(value, tuple(VALID_SUMMARY_TYPES))
}
self._try_log_hparams(trial, scrubbed_result)
self._trial_writer[trial].close()
del self._trial_writer[trial]
del self._trial_result[trial]
def _try_log_hparams(self, trial: "Trial", result: Dict):
# TBX currently errors if the hparams value is None.
flat_params = flatten_dict(trial.evaluated_params)
scrubbed_params = {
k: v for k, v in flat_params.items() if isinstance(v, self.VALID_HPARAMS)
}
np_params = {
k: v.tolist()
for k, v in flat_params.items()
if isinstance(v, self.VALID_NP_HPARAMS)
}
scrubbed_params.update(np_params)
removed = {
k: v
for k, v in flat_params.items()
if not isinstance(v, self.VALID_HPARAMS + self.VALID_NP_HPARAMS)
}
if removed:
logger.info(
"Removed the following hyperparameter values when "
"logging to tensorboard: %s",
str(removed),
)
from tensorboardX.summary import hparams
try:
experiment_tag, session_start_tag, session_end_tag = hparams(
hparam_dict=scrubbed_params, metric_dict=result
)
self._trial_writer[trial].file_writer.add_summary(experiment_tag)
self._trial_writer[trial].file_writer.add_summary(session_start_tag)
self._trial_writer[trial].file_writer.add_summary(session_end_tag)
except Exception:
logger.exception(
"TensorboardX failed to log hparams. "
"This may be due to an unsupported type "
"in the hyperparameter values."
)
|
TBXLoggerCallback
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-number-of-flips-to-make-the-binary-string-alternating.py
|
{
"start": 29,
"end": 595
}
|
class ____(object):
def minFlips(self, s):
"""
:type s: str
:rtype: int
"""
result = float("inf")
cnt1 = cnt2 = 0
for i in xrange(2*len(s)-1 if len(s)%2 else len(s)):
if i >= len(s):
cnt1 -= int(s[i%len(s)])^((i-len(s))%2)^0
cnt2 -= int(s[i%len(s)])^((i-len(s))%2)^1
cnt1 += int(s[i%len(s)])^(i%2)^0
cnt2 += int(s[i%len(s)])^(i%2)^1
if i >= len(s)-1:
result = min(result, cnt1, cnt2)
return result
|
Solution
|
python
|
pallets__itsdangerous
|
tests/test_itsdangerous/test_timed.py
|
{
"start": 512,
"end": 758
}
|
class ____:
@pytest.fixture()
def ts(self):
return datetime(2011, 6, 24, 0, 9, 5, tzinfo=timezone.utc)
@pytest.fixture(autouse=True)
def freeze(self, ts):
with freeze_time(ts) as ft:
yield ft
|
FreezeMixin
|
python
|
jackfrued__Python-100-Days
|
Day31-35/code/example12.py
|
{
"start": 367,
"end": 459
}
|
class ____(Employee):
"""部门经理"""
def get_salary(self):
return 15000.0
|
Manager
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/internal/conjecture/data.py
|
{
"start": 17050,
"end": 17184
}
|
class ____:
status: Status = Status.OVERRUN
def __repr__(self) -> str:
return "Overrun"
Overrun = _Overrun()
|
_Overrun
|
python
|
google__jax
|
tests/pallas/mosaic_gpu_test.py
|
{
"start": 5608,
"end": 5740
}
|
class ____(PallasTest, jtu.CudaArchSpecificTest):
def setUp(self):
self.skip_unless_sm90a()
super().setUp()
|
PallasSm90ATest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/tpu_strategy_test.py
|
{
"start": 8961,
"end": 41146
}
|
class ____(test.TestCase, parameterized.TestCase):
def test_handle_in_cross_replica_context(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
v = variables.Variable(1.0)
@def_function.function
def func():
self.assertEndsWith(v.handle.device, "device:TPU:0")
return v + 1.0
ret = func()
self.assertAllEqual(ret, 2.0)
def test_save(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
v = variables.Variable(1.0)
export_dir = self.create_tempdir()
save.save(v, export_dir)
reloaded_var = load.load(export_dir)
self.assertAllEqual(reloaded_var, 1.0)
def test_packed_variable_export(self, enable_packed_var):
if not enable_packed_var:
self.skipTest("Test for Packed Variables only.")
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
export_dir = self.get_temp_dir()
export_archive = TestExportArchive(1.0)
export_archive.save_function(export_dir)
restored_object = load.load(export_dir)
with ops.device("/tpu:0"):
self.assertAllEqual(restored_object._packed_var, 1.0)
def testStaticHashTableDatasetFnHostTrainingLoop(self, enable_packed_var):
self._dataset_fn_tracing_count = 0
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
vals = [0, 1, 2]
keys_tensor = constant_op.constant(
list(range(len(vals))), dtype=dtypes.int64)
vals_tensor = constant_op.constant(vals)
initializer = lookup_ops.KeyValueTensorInitializer(
keys_tensor, vals_tensor)
per_worker_table = lookup_ops.StaticHashTable(
initializer, default_value=-1)
@def_function.function
def dataset_fn(input_context):
tensor = constant_op.constant([0, 1, 3], dtype=dtypes.int64)
global_batch_size = 2
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = dataset_ops.Dataset.from_tensors(tensor).repeat().batch(
batch_size, drop_remainder=True)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
dataset = dataset.prefetch(2) # This prefetches 2 batches per device.
dataset = dataset.map(per_worker_table.lookup)
self._dataset_fn_tracing_count += 1
return dataset
dist_iterator = iter(
strategy.experimental_distribute_datasets_from_function(dataset_fn))
@def_function.function
def step_fn(inputs):
# inputs should be [0, 1, -1]
return math_ops.reduce_sum(inputs)
def train_steps(iterator, steps):
for _ in math_ops.range(steps):
strategy.run(step_fn, args=(next(iterator),))
train_steps(dist_iterator, steps=5)
self.assertEqual(self._dataset_fn_tracing_count, 1)
def test_function_compile_with_xla(self, enable_packed_var):
if FLAGS.tpu_use_tfrt:
self.skipTest(
"This test triggers _XlaCompile and XlaLaunch which are not "
"supported in tfrt yet. We should avoid using these kernels on TPU. "
"However, it is a workaround to support b/129842431. We need more "
"discussion about how to support it in the long term.")
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
v = variables.Variable(1.0)
@def_function.function
def func():
return v.read_value() + 1.0
with ops.device("/device:TPU:0"):
self.assertAllEqual(func(), 2.0)
def test_sequential_runs(self, enable_packed_var):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_cluster_resolver.initialize_tpu_system(resolver)
# Computation replicated to all cores.
device_assignment = device_assignment_lib.DeviceAssignment.build(
topology, num_replicas=2)
strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment)
strategy._enable_packed_variable_in_eager_mode = enable_packed_var
# Computation on the 1st core.
device_assignment2 = device_assignment_lib.DeviceAssignment.build(
topology, num_replicas=1)
strategy2 = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment2)
def computation(x):
return math_ops.square(x)
@def_function.function
def train_step():
outputs = strategy.experimental_local_results(
strategy.run(computation, args=([2., 2.],)))
outputs2 = strategy2.run(
computation, args=([outputs[0]],))
return outputs2
self.assertAllEqual([[16., 16.]], train_step())
def test_device_switch_case(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
a = variables.Variable(1)
inference_iteration = variables.Variable(-1)
def inference_fn(x, i):
return a + x + i
@def_function.function
def run_inference(x):
def do_inference(device, inference_fn, i):
with ops.device(device):
return inference_fn(x, i)
branch_fns = {
0: (lambda: do_inference("/device:TPU:0", inference_fn, 0)),
1: (lambda: do_inference("/device:TPU:1", inference_fn, 1)),
}
branch_index = inference_iteration.assign_add(1, use_locking=True) % 2
return control_flow_switch_case.switch_case(branch_index, branch_fns)
self.assertAllEqual(2., run_inference(1)) # Use TPU core 0.
self.assertAllEqual(3., run_inference(1)) # Use TPU core 1.
def test_recover_from_compilation_failures(self, enable_packed_var):
# TODO(b/148150981): Stop skipping this test once recovery works
# for non-local TPU.
if FLAGS.tpu:
self.skipTest("Recovery fails for non-local TPU, see b/148150981")
# Disable automatic outside compilation.
config.set_soft_device_placement(False)
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def compilation_failure_run():
def computation():
return random_ops.random_gamma([10], [0.5, 1.5])
return strategy.run(computation)
with self.assertRaises(errors.OpError):
compilation_failure_run()
@def_function.function
def good_run():
def computation():
return random_ops.random_normal([10])
return strategy.run(computation)
good_run()
def test_dynamic_shape_with_outside_compilation_failure(
self, enable_packed_var):
# Enable automatic outside compilation.
config.set_soft_device_placement(True)
strategy = get_tpu_strategy(enable_packed_var)
dataset = dataset_ops.Dataset.from_tensors(("string", 1.0)).repeat().batch(
2, drop_remainder=False)
dataset = strategy.experimental_distribute_dataset(dataset)
iterator = iter(dataset)
@def_function.function
def train_fn(iterator):
def step_fn(inputs):
input0, input1 = inputs
return array_ops.size(input0), math_ops.reduce_sum(input1)
return strategy.experimental_local_results(
strategy.run(step_fn, args=(next(iterator),)))
with self.assertRaises(errors.InvalidArgumentError):
logging.info(train_fn(iterator))
def test_computation_on_subset_cores(self, enable_packed_var):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_cluster_resolver.initialize_tpu_system(resolver)
all_core_strategy = tpu_lib.TPUStrategyV2(resolver)
all_core_strategy._enable_packed_variable_in_eager_mode = enable_packed_var
with all_core_strategy.scope():
v = variables.Variable(0.0,
aggregation=variables.VariableAggregation.MEAN)
# Computation on the 1st core.
device_assignment = device_assignment_lib.DeviceAssignment.build(
topology, num_replicas=1)
first_core_strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment)
first_core_strategy._enable_packed_variable_in_eager_mode = (
enable_packed_var)
# Computation on the 2nd core.
device_assignment2 = device_assignment_lib.DeviceAssignment(
topology, [[[0, 0, 0, 1]]])
second_core_strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment2)
second_core_strategy._enable_packed_variable_in_eager_mode = (
enable_packed_var)
@def_function.function
def train_step():
def step_fn():
return v + 1.0
all_core_strategy.run(step_fn)
r1 = first_core_strategy.run(step_fn)
r2 = second_core_strategy.run(step_fn)
return r1 + r2
train_step()
self.assertAllEqual(2., train_step())
def test_worker_devices_on_subset_cores(self, enable_packed_var):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_cluster_resolver.initialize_tpu_system(resolver)
# Strategy for the 1st core.
device_assignment = device_assignment_lib.DeviceAssignment.build(
topology, num_replicas=1)
first_core_strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment)
first_core_strategy._enable_packed_variable_in_eager_mode = (
enable_packed_var)
# Strategy for the 2nd core.
device_assignment2 = device_assignment_lib.DeviceAssignment(
topology, [[[0, 0, 0, 1]]])
second_core_strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment2)
second_core_strategy._enable_packed_variable_in_eager_mode = (
enable_packed_var)
self.assertLen(first_core_strategy.extended.worker_devices, 1)
self.assertEndsWith(first_core_strategy.extended.worker_devices[0],
"device:TPU:0")
self.assertLen(second_core_strategy.extended.worker_devices, 1)
self.assertEndsWith(second_core_strategy.extended.worker_devices[0],
"device:TPU:1")
def test_control_output_in_while_body_fn(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
v = variables.Variable(
0.0, aggregation=variables.VariableAggregation.MEAN)
@def_function.function
def train_step():
def step_fn():
v.assign_add(1)
for _ in math_ops.range(2):
strategy.run(step_fn)
train_step()
self.assertEqual(2.0, v.numpy())
def test_cluster_conditional_with_dynamic_shape(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def train_step():
def shape_list(tensor):
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dynamic_shape = array_ops.shape(input=tensor)
for index in non_static_indexes:
shape[index] = dynamic_shape[index]
return shape
def step_fn(condition):
where = array_ops.where(condition)
if array_ops.shape(where)[0] > 0:
tensor_shape = shape_list(where)
d1 = tensor_shape[0]
d2 = tensor_shape[1]
where = array_ops.reshape(where, [d1, d2])
return where
return strategy.run(step_fn, args=([True, False, True],))
outputs = strategy.experimental_local_results(train_step())
self.assertAllEqual(outputs[0].numpy(), [[0], [2]])
def test_cluster_in_graph_and_while_body_fn(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def train_step():
def step_fn(prev):
s = prev + 1
return s
def init_fn():
return array_ops.zeros(shape=())
prev = strategy.run(init_fn)
for _ in math_ops.range(10):
prev = strategy.run(step_fn, args=(prev,))
return strategy.reduce(reduce_util.ReduceOp.SUM, prev, axis=None)
sum_val = train_step().numpy().astype(float)
self.assertEqual(sum_val, strategy.num_replicas_in_sync * 10)
def test_two_clusters_with_same_fn(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def foo(x):
return strategy.run(lambda x: x + 1, (x,))
@def_function.function
def bar(x):
foo(x)
return foo(x)
bar(1)
def test_tpu_variable_run_argument(self, enable_packed_var):
# TPUStrategy.run() casts inputs to Tensor, but has logic to preserve
# variables to avoid unintuitive errors.
# Here we test that a TPUDistributedVariable passed to TPUStrategy.run()
# remains a variable.
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
tpu_variable = variables.Variable(1)
def replica_step(first_arg, variable):
del first_arg # Just here to make sure we're not relying on arg position.
if variable is not None:
self.assertIsInstance(variable, tpu_values.TPUDistributedVariable)
@def_function.function
def step():
strategy.run(
replica_step, args=(
2,
tpu_variable,
))
step()
def test_tpu_run_arg_parsing(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
tpu_vars = [variables.Variable(1)]
def only_star_args(*args):
del args
def pos_and_star_args(first_arg, *args):
del first_arg
del args
def named_args(first_arg, second_arg):
del first_arg
del second_arg
def star_args_and_kw_only(*args, kw):
del args
del kw
# pylint:disable=function-redefined
@def_function.function
def step():
strategy.run(only_star_args, args=(2,))
step()
@def_function.function
def step():
strategy.run(named_args, kwargs={"first_arg": 2, "second_arg": 3})
step()
with self.assertRaisesRegex(TypeError, r"got multiple values for argument"):
@def_function.function
def step():
strategy.run(
named_args, args=(1,), kwargs={
"first_arg": 2,
"second_arg": 3
})
step()
with self.assertRaisesRegex(ValueError,
r"cannot handle Variables passed to \*args"):
@def_function.function
def step():
strategy.run(
only_star_args, args=(
2,
tpu_vars,
))
step()
@def_function.function
def step():
strategy.run(pos_and_star_args, args=(2, 3, 4))
step()
@def_function.function
def step():
strategy.run(star_args_and_kw_only, args=(2, 3), kwargs={"kw": tpu_vars})
step()
with self.assertRaisesRegex(ValueError,
r"mix of positional args and \*args"):
@def_function.function
def step():
strategy.run(pos_and_star_args, args=(tpu_vars, 3, 4))
step()
with self.assertRaisesRegex(ValueError, r"Too many positional arguments"):
@def_function.function
def step():
strategy.run(named_args, args=(2, 3, 4))
step()
class DummyClass:
@def_function.function
def method(self, arg_1):
del arg_1
def step(self):
strategy.run(self.method, args=(tpu_vars,))
DummyClass().step()
# pylint:enable=function-redefined
def test_using_external_variable_inside_tf_function(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
dataset = dataset_ops.Dataset.range(
strategy.num_replicas_in_sync * 2,
output_type=dtypes.float32).batch(strategy.num_replicas_in_sync)
input_iterator = iter(strategy.experimental_distribute_dataset(dataset))
v = variables.Variable(2.0)
@def_function.function
def train_step(data):
def computation(inputs):
return inputs + v
return strategy.run(computation, args=(data,))
expected_result = [[x + 2.] for x in range(0, strategy.num_replicas_in_sync)
]
self.assertAllEqual(
expected_result,
strategy.experimental_local_results(train_step(next(input_iterator))))
# TODO(b/145574622): Remove this test once it is re-enabled in values_test.py.
def test_all_reduce_on_sync_on_read_variable(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
dataset = dataset_ops.Dataset.range(
strategy.num_replicas_in_sync, output_type=dtypes.float32).batch(
strategy.num_replicas_in_sync, drop_remainder=True)
input_iterator = iter(strategy.experimental_distribute_dataset(dataset))
with strategy.scope():
w = variables.Variable(
(0.,),
shape=(1,),
trainable=False,
synchronization=variables.VariableSynchronization.ON_READ,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA)
self.assertFalse(w._is_mirrored())
@def_function.function
def run(iterator):
def computation(x):
w.assign(x + w)
return w
def all_reduce(x):
ctx = distribute_lib.get_replica_context()
return ctx.all_reduce("SUM", w) + x
outputs = strategy.run(computation, args=(next(iterator),))
outputs2 = strategy.experimental_local_results(
strategy.run(all_reduce, args=(outputs,)))
return outputs2
data = range(0, strategy.num_replicas_in_sync)
data_sum = sum(data)
expected_result = [
[x + data_sum] for x in range(0, strategy.num_replicas_in_sync)
]
self.assertAllEqual(expected_result, run(input_iterator))
self.assertAllEqual((0.,), w.read_value())
def test_run_output_on_device(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
def computation(x):
return math_ops.square(x)
@def_function.function
def train_step():
outputs = strategy.experimental_local_results(
strategy.run(computation, args=(2,)))
return outputs
results = train_step()
self.assertAllEqual([4., 4.], results)
self.assertAllEqual("/job:localhost/replica:0/task:0/device:TPU:0",
results[0].backing_device)
self.assertAllEqual("/job:localhost/replica:0/task:0/device:TPU:1",
results[1].backing_device)
def test_run_passing_and_returning_nones(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def train_step():
def computation(x):
return x
# Note that this input None is nested.
outputs = strategy.experimental_local_results(
strategy.run(computation, args=([1, [2, None]],)))
return outputs
results = train_step()
self.assertAllEqual(1, results[0][0])
self.assertAllEqual(2, results[0][1][0])
self.assertIsNone(results[0][1][1])
def test_run_passing_and_returning_empty_list(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def train_step():
def computation(x):
return x
outputs = strategy.experimental_local_results(
strategy.run(computation, args=([],)))
return outputs
self.assertEqual([], train_step()[0])
def test_run_passing_and_returning_empty_dict(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def train_step():
def computation(x):
return x
outputs = strategy.experimental_local_results(
strategy.run(computation, args=({},)))
return outputs
self.assertEqual({}, train_step()[0])
def test_composite_input_output(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
if strategy.num_replicas_in_sync != 2:
self.skipTest("Test assumes two replicas.")
with strategy.scope():
table = variables.Variable(
initial_value=[[0.0, 1.0], [3.0, 7.0]], dtype=dtypes.float32)
@def_function.function
def sparse_lookup(iterator):
def tpu_function(sparse):
# Assumes dense_shape is (2, *)
looked_up = array_ops.gather(table, sparse.values)
segment_sum = math_ops.unsorted_segment_sum(
looked_up, sparse.indices[:, 0], 2)
return sparse, segment_sum
return nest.map_structure(
strategy.experimental_local_results,
strategy.run(tpu_function, args=(next(iterator),)))
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(2)
def make_sparse(_):
return sparse_tensor.SparseTensor(
indices=array_ops.constant([[0, 0], [1, 0], [1, 1]],
dtype=dtypes.int64),
values=array_ops.constant([0, 0, 1], dtype=dtypes.int32),
dense_shape=array_ops.constant([2, 2], dtype=dtypes.int64))
return dataset.map(make_sparse)
dataset = iter(
strategy.distribute_datasets_from_function(
dataset_fn,
distribute_lib.InputOptions(experimental_fetch_to_device=False)))
sparse, result = sparse_lookup(dataset)
# All replicas return identical results.
for replica in range(strategy.num_replicas_in_sync):
self.assertIsInstance(sparse[replica], sparse_tensor.SparseTensor)
self.assertAllEqual(sparse[replica].indices, [[0, 0], [1, 0], [1, 1]])
self.assertAllEqual(sparse[replica].values, [0, 0, 1])
self.assertAllEqual(sparse[replica].dense_shape, [2, 2])
self.assertAllEqual(result[replica], [[0.0, 1.0], [3.0, 8.0]])
def test_composite_input_non_flat_output(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
if strategy.num_replicas_in_sync != 2:
self.skipTest("Test assumes two replicas.")
with strategy.scope():
table = variables.Variable(
initial_value=[[0.0, 1.0], [3.0, 7.0]], dtype=dtypes.float32)
@def_function.function
def sparse_lookup(iterator):
def tpu_function(sparse):
# Assumes dense_shape is (2, *)
looked_up = array_ops.gather(table, sparse.values)
segment_sum = math_ops.unsorted_segment_sum(
looked_up, sparse.indices[:, 0], 2)
return {"sparse": sparse, "segment_sum": segment_sum}
return nest.map_structure(
strategy.experimental_local_results,
strategy.run(tpu_function, args=(next(iterator),)))
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(2)
def make_sparse(_):
return sparse_tensor.SparseTensor(
indices=array_ops.constant([[0, 0], [1, 0], [1, 1]],
dtype=dtypes.int64),
values=array_ops.constant([0, 0, 1], dtype=dtypes.int32),
dense_shape=array_ops.constant([2, 2], dtype=dtypes.int64))
return dataset.map(make_sparse)
dataset = iter(
strategy.distribute_datasets_from_function(
dataset_fn,
distribute_lib.InputOptions(experimental_fetch_to_device=False)))
output = sparse_lookup(dataset)
# All replicas return identical results.
for replica in range(strategy.num_replicas_in_sync):
self.assertIsInstance(output["sparse"][replica],
sparse_tensor.SparseTensor)
self.assertAllEqual(output["sparse"][replica].indices,
[[0, 0], [1, 0], [1, 1]])
self.assertAllEqual(output["sparse"][replica].values, [0, 0, 1])
self.assertAllEqual(output["sparse"][replica].dense_shape, [2, 2])
self.assertAllEqual(output["segment_sum"][replica],
[[0.0, 1.0], [3.0, 8.0]])
def test_composite_input_dynamic_shapes_outside_compilation(
self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
if strategy.num_replicas_in_sync != 2:
self.skipTest("Test assumes two replicas.")
table = variables.Variable(
initial_value=[[0.0, 1.0], [3.0, 7.0]], dtype=dtypes.float32)
@def_function.function
def sparse_lookup(iterator):
def tpu_function(sparse):
lookup = tpu_replication.outside_compilation(
embedding_ops.safe_embedding_lookup_sparse, table, sparse)
return math_ops.reduce_sum(lookup, axis=0)
return strategy.experimental_local_results(
strategy.run(tpu_function, args=(next(iterator),)))
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(2)
def make_sparse(i):
indices = array_ops.constant([[0, 0], [1, 0], [1, 1]],
dtype=dtypes.int64)[0:2 + i]
values = array_ops.constant([0, 0, 1], dtype=dtypes.int32)[0:2 + i]
shape = [
array_ops.constant([2], dtype=dtypes.int64),
array_ops.expand_dims(1 + i, axis=0)
]
dense_shape = array_ops.concat(shape, axis=0)
return sparse_tensor.SparseTensor(
indices=indices, values=values, dense_shape=dense_shape)
return dataset.map(make_sparse)
dataset = iter(
strategy.distribute_datasets_from_function(
dataset_fn,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
result = sparse_lookup(dataset)
self.assertAllEqual(result, [[0.0, 2.0], [1.5, 5.0]])
def test_composite_input_with_non_flat_components(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
class TestCompositeTypeSpec(type_spec.TypeSpec):
def __init__(self, component_type_spec):
self._component_type_spec = component_type_spec
@property
def value_type(self):
return TestComposite
def _to_components(self, value):
return value.values
def _from_components(self, components):
return TestComposite(components[0], components[1][0], components[1][1])
@property
def _component_specs(self):
return [self._component_type_spec,
[self._component_type_spec, self._component_type_spec]]
def _serialize(self):
return (self._component_type_spec,)
class TestComposite(composite_tensor.CompositeTensor):
def __init__(self, value1, value2, value3):
self.values = [value1, [value2, value3]]
@property
def _type_spec(self):
return TestCompositeTypeSpec(
tensor_spec.TensorSpec.from_tensor(self.values[0]))
def _shape_invariant_to_type_spec(self, shape):
return [shape, [shape, shape]]
@def_function.function
def test_fn(test_composite):
def tpu_function(composite):
return (composite,
composite.values[0] + (
composite.values[1][0] + composite.values[1][1])/2)
return nest.map_structure(
strategy.experimental_local_results,
strategy.run(tpu_function, args=(test_composite,)))
a = array_ops.constant([0.1])
b = array_ops.constant([1.2])
c = array_ops.constant([-0.4])
test_composite = TestComposite(a, b, c)
composite, result = test_fn(test_composite)
# All replicas return identical results.
for replica in range(strategy.num_replicas_in_sync):
self.assertIsInstance(composite[replica], TestComposite)
self.assertAllEqual(composite[replica].values[0], a)
self.assertAllEqual(composite[replica].values[1][0], b)
self.assertAllEqual(composite[replica].values[1][1], c)
self.assertAllEqual(result[replica], array_ops.constant([0.50000006]))
def test_per_device_tracing_of_mirrored_variables(self, enable_packed_var):
# Define trace_count as a list to avoid python scoping error
trace_count = [0]
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
variable = variables.Variable(0.0)
@def_function.function
def add_one():
trace_count[0] = trace_count[0] + 1
return math_ops.add(variable, constant_op.constant(1.0))
@def_function.function
def update_variable():
for device in set(strategy.extended.worker_devices):
with ops.device(device):
add_one()
with strategy.scope():
update_variable.get_concrete_function()
self.assertLen(strategy.extended.worker_devices, trace_count[0])
def test_tpu_cancellation_does_not_close_chips(self, enable_packed_var):
if tpu_lib.enable_batch_variable_initialization():
self.skipTest("b/271767559")
if not FLAGS.tpu_use_tfrt:
self.skipTest(
"`tpu_cancellation_closes_chip only applies to TFRT TPU Runtime.")
strategy = get_tpu_strategy(enable_packed_var)
num_replicas = strategy.num_replicas_in_sync
with strategy.scope():
x = random_ops.random_normal((10240, 10240))
y = random_ops.random_normal((10240, 10240))
v = variables.Variable(array_ops.identity(x))
dist_dataset = strategy.experimental_distribute_dataset(
dataset_ops.Dataset.from_tensors(y).repeat(num_replicas).batch(
num_replicas))
dist_iterator = iter(dist_dataset)
@def_function.function
def train_steps(v, iterator, steps):
def step_fn(inputs):
for val in inputs:
v.assign(math_ops.matmul(v, val))
for _ in math_ops.range(steps):
strategy.run(step_fn, args=(next(iterator),))
with self.assertRaises(errors.OutOfRangeError):
# The iterator has num_replicas/num_replicas = 1 step only.
train_steps(v, dist_iterator, 2)
# If TPU chips are not closed we can run the function on TPU again.
w = variables.Variable(array_ops.identity(x))
dist_dataset = strategy.experimental_distribute_dataset(
dataset_ops.Dataset.from_tensors(y).repeat(num_replicas).batch(
num_replicas))
dist_iterator = iter(dist_dataset)
train_steps(w, dist_iterator, 1)
def test_tpu_hardware_feature(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
self.assertIsInstance(
strategy.extended.tpu_hardware_feature.embedding_feature,
tpu_hardware_feature.HardwareFeature.EmbeddingFeature)
def test_get_tpu_cluster_resolver(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
self.assertIsNotNone(strategy.cluster_resolver)
def test_replica_order_for_distribute_datasets_from_function(
self, enable_packed_var
):
def _create_dataset(strategy):
def dataset_fn(ctx):
del ctx
return dataset_ops.Dataset.range(2)
return strategy.distribute_datasets_from_function(dataset_fn)
values = self._test_replica_order(_create_dataset).values
self.assertLen(values, 2)
self.assertEqual(1, values[0].numpy())
self.assertEqual(0, values[1].numpy())
def test_replica_order_for_experimental_distribute_dataset(
self, enable_packed_var
):
def _create_dataset(strategy):
dataset = dataset_ops.Dataset.range(2).batch(2)
return strategy.experimental_distribute_dataset(dataset)
values = self._test_replica_order(_create_dataset).values
self.assertLen(values, 2)
self.assertEqual(1, values[0].numpy())
self.assertEqual(0, values[1].numpy())
def _test_replica_order(self, create_dist_dataset_fn):
tf2.enable()
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_cluster_resolver.initialize_tpu_system(resolver)
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=[[[0, 0, 0, 1]], [[0, 0, 0, 0]]]
)
strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment
)
strategy.extended._enable_data_reorder = True
dist_dataset = create_dist_dataset_fn(strategy)
iterator = iter(dist_dataset)
@def_function.function
def test_iterators_order(iterator):
return next(iterator)
return test_iterators_order(iterator)
@test_util.with_eager_op_as_function
|
TPUStrategyTest
|
python
|
tornadoweb__tornado
|
tornado/test/simple_httpclient_test.py
|
{
"start": 27875,
"end": 28870
}
|
class ____(AsyncHTTPTestCase):
def setUp(self):
self.cleanup_event = Event()
test = self
# Dummy Resolver subclass that never finishes.
class BadResolver(Resolver):
@gen.coroutine
def resolve(self, *args, **kwargs):
yield test.cleanup_event.wait()
# Return something valid so the test doesn't raise during cleanup.
return [(socket.AF_INET, ("127.0.0.1", test.get_http_port()))]
super().setUp()
self.http_client = SimpleAsyncHTTPClient(resolver=BadResolver())
def get_app(self):
return Application([url("/hello", HelloWorldHandler)])
def test_resolve_timeout(self):
with self.assertRaises(HTTPTimeoutError):
self.fetch("/hello", connect_timeout=0.1, raise_error=True)
# Let the hanging coroutine clean up after itself
self.cleanup_event.set()
self.io_loop.run_sync(lambda: gen.sleep(0))
|
ResolveTimeoutTestCase
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/vision.py
|
{
"start": 4376,
"end": 25913
}
|
class ____(GoogleBaseHook):
"""
Hook for Google Cloud Vision APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
_client: ProductSearchClient | None
product_name_determiner = NameDeterminer("Product", "product_id", ProductSearchClient.product_path)
product_set_name_determiner = NameDeterminer(
"ProductSet", "productset_id", ProductSearchClient.product_set_path
)
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._client = None
def get_conn(self) -> ProductSearchClient:
"""
Retrieve a connection to Cloud Vision.
:return: Google Cloud Vision client object.
"""
if not self._client:
self._client = ProductSearchClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@cached_property
def annotator_client(self) -> ImageAnnotatorClient:
"""
Creates ImageAnnotatorClient.
:return: Google Image Annotator client object.
"""
return ImageAnnotatorClient(credentials=self.get_credentials())
@staticmethod
def _check_for_error(response: dict) -> None:
if "error" in response:
raise AirflowException(response)
@GoogleBaseHook.fallback_to_default_project_id
def create_product_set(
self,
location: str,
product_set: ProductSet | None,
project_id: str = PROVIDE_PROJECT_ID,
product_set_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> str:
"""
Create product set.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductSetOperator`.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Creating a new ProductSet under the parent: %s", parent)
response = client.create_product_set(
parent=parent,
product_set=product_set,
product_set_id=product_set_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("ProductSet created: %s", response.name if response else "")
self.log.debug("ProductSet created:\n%s", response)
if not product_set_id:
# Product set id was generated by the API
product_set_id = self._get_autogenerated_id(response)
self.log.info("Extracted autogenerated ProductSet ID from the response: %s", product_set_id)
return product_set_id
@GoogleBaseHook.fallback_to_default_project_id
def get_product_set(
self,
location: str,
product_set_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> dict:
"""
Get product set.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionGetProductSetOperator`.
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info("Retrieving ProductSet: %s", name)
response = client.get_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info("ProductSet retrieved.")
self.log.debug("ProductSet retrieved:\n%s", response)
return MessageToDict(response._pb)
@GoogleBaseHook.fallback_to_default_project_id
def update_product_set(
self,
product_set: dict | ProductSet,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
product_set_id: str | None = None,
update_mask: dict | field_mask_pb2.FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> dict:
"""
Update product set.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionUpdateProductSetOperator`.
"""
client = self.get_conn()
product_set = self.product_set_name_determiner.get_entity_with_name(
product_set, product_set_id, location, project_id
)
if isinstance(product_set, dict):
product_set = ProductSet(product_set)
self.log.info("Updating ProductSet: %s", product_set.name)
response = client.update_product_set(
product_set=product_set,
update_mask=update_mask, # type: ignore
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("ProductSet updated: %s", response.name if response else "")
self.log.debug("ProductSet updated:\n%s", response)
return MessageToDict(response._pb)
@GoogleBaseHook.fallback_to_default_project_id
def delete_product_set(
self,
location: str,
product_set_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete product set.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDeleteProductSetOperator`.
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info("Deleting ProductSet: %s", name)
client.delete_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info("ProductSet with the name [%s] deleted.", name)
@GoogleBaseHook.fallback_to_default_project_id
def create_product(
self,
location: str,
product: dict | Product,
project_id: str = PROVIDE_PROJECT_ID,
product_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Create product.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductOperator`.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Creating a new Product under the parent: %s", parent)
if isinstance(product, dict):
product = Product(product)
response = client.create_product(
parent=parent,
product=product,
product_id=product_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Product created: %s", response.name if response else "")
self.log.debug("Product created:\n%s", response)
if not product_id:
# Product id was generated by the API
product_id = self._get_autogenerated_id(response)
self.log.info("Extracted autogenerated Product ID from the response: %s", product_id)
return product_id
@GoogleBaseHook.fallback_to_default_project_id
def get_product(
self,
location: str,
product_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Get product.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionGetProductOperator`.
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info("Retrieving Product: %s", name)
response = client.get_product(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info("Product retrieved.")
self.log.debug("Product retrieved:\n%s", response)
return MessageToDict(response._pb)
@GoogleBaseHook.fallback_to_default_project_id
def update_product(
self,
product: dict | Product,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
product_id: str | None = None,
update_mask: dict | field_mask_pb2.FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Update product.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionUpdateProductOperator`.
"""
client = self.get_conn()
product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id)
if isinstance(product, dict):
product = Product(product)
self.log.info("Updating ProductSet: %s", product.name)
response = client.update_product(
product=product,
update_mask=update_mask, # type: ignore
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Product updated: %s", response.name if response else "")
self.log.debug("Product updated:\n%s", response)
return MessageToDict(response._pb)
@GoogleBaseHook.fallback_to_default_project_id
def delete_product(
self,
location: str,
product_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete product.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDeleteProductOperator`.
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info("Deleting ProductSet: %s", name)
client.delete_product(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info("Product with the name [%s] deleted:", name)
@GoogleBaseHook.fallback_to_default_project_id
def create_reference_image(
self,
location: str,
product_id: str,
reference_image: dict | ReferenceImage,
project_id: str,
reference_image_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> str:
"""
Create reference image.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionCreateReferenceImageOperator`.
"""
client = self.get_conn()
self.log.info("Creating ReferenceImage")
parent = ProductSearchClient.product_path(project=project_id, location=location, product=product_id)
if isinstance(reference_image, dict):
reference_image = ReferenceImage(reference_image)
response = client.create_reference_image(
parent=parent,
reference_image=reference_image,
reference_image_id=reference_image_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("ReferenceImage created: %s", response.name if response else "")
self.log.debug("ReferenceImage created:\n%s", response)
if not reference_image_id:
# Reference image id was generated by the API
reference_image_id = self._get_autogenerated_id(response)
self.log.info(
"Extracted autogenerated ReferenceImage ID from the response: %s", reference_image_id
)
return reference_image_id
@GoogleBaseHook.fallback_to_default_project_id
def delete_reference_image(
self,
location: str,
product_id: str,
reference_image_id: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete reference image.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDeleteReferenceImageOperator`.
"""
client = self.get_conn()
self.log.info("Deleting ReferenceImage")
name = ProductSearchClient.reference_image_path(
project=project_id, location=location, product=product_id, reference_image=reference_image_id
)
client.delete_reference_image(
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("ReferenceImage with the name [%s] deleted.", name)
@GoogleBaseHook.fallback_to_default_project_id
def add_product_to_product_set(
self,
product_set_id: str,
product_id: str,
project_id: str,
location: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Add product to product set.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionAddProductToProductSetOperator`.
"""
client = self.get_conn()
product_name = ProductSearchClient.product_path(project_id, location, product_id)
product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info("Add Product[name=%s] to Product Set[name=%s]", product_name, product_set_name)
client.add_product_to_product_set(
name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info("Product added to Product Set")
@GoogleBaseHook.fallback_to_default_project_id
def remove_product_from_product_set(
self,
product_set_id: str,
product_id: str,
project_id: str,
location: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Remove product from product set.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionRemoveProductFromProductSetOperator`.
"""
client = self.get_conn()
product_name = ProductSearchClient.product_path(project_id, location, product_id)
product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info("Remove Product[name=%s] from Product Set[name=%s]", product_name, product_set_name)
client.remove_product_from_product_set(
name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info("Product removed from Product Set")
def annotate_image(
self,
request: dict | AnnotateImageRequest,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
) -> dict:
"""
Annotate image.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionImageAnnotateOperator`.
"""
client = self.annotator_client
self.log.info("Annotating image")
response = client.annotate_image(request=request, retry=retry, timeout=timeout)
self.log.info("Image annotated")
return MessageToDict(response._pb)
@GoogleBaseHook.quota_retry()
def batch_annotate_images(
self,
requests: list[dict] | list[AnnotateImageRequest],
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
) -> dict:
"""
Batch annotate images.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionImageAnnotateOperator`.
"""
client = self.annotator_client
self.log.info("Annotating images")
requests = list(map(AnnotateImageRequest, requests))
response = client.batch_annotate_images(requests=requests, retry=retry, timeout=timeout)
self.log.info("Images annotated")
return MessageToDict(response._pb)
@GoogleBaseHook.quota_retry()
def text_detection(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
) -> dict:
"""
Text detection.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDetectTextOperator`.
"""
client = self.annotator_client
self.log.info("Detecting text")
if additional_properties is None:
additional_properties = {}
response = client.text_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response._pb)
self._check_for_error(response)
self.log.info("Text detection finished")
return response
@GoogleBaseHook.quota_retry()
def document_text_detection(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
) -> dict:
"""
Document text detection.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionTextDetectOperator`.
"""
client = self.annotator_client
self.log.info("Detecting document text")
if additional_properties is None:
additional_properties = {}
response = client.document_text_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response._pb)
self._check_for_error(response)
self.log.info("Document text detection finished")
return response
@GoogleBaseHook.quota_retry()
def label_detection(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
) -> dict:
"""
Label detection.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDetectImageLabelsOperator`.
"""
client = self.annotator_client
self.log.info("Detecting labels")
if additional_properties is None:
additional_properties = {}
response = client.label_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response._pb)
self._check_for_error(response)
self.log.info("Labels detection finished")
return response
@GoogleBaseHook.quota_retry()
def safe_search_detection(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
) -> dict:
"""
Safe search detection.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDetectImageSafeSearchOperator`.
"""
client = self.annotator_client
self.log.info("Detecting safe search")
if additional_properties is None:
additional_properties = {}
response = client.safe_search_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response._pb)
self._check_for_error(response)
self.log.info("Safe search detection finished")
return response
@staticmethod
def _get_autogenerated_id(response) -> str:
try:
name = response.name
except AttributeError as e:
raise AirflowException(f"Unable to get name from response... [{response}]\n{e}")
if "/" not in name:
raise AirflowException(f"Unable to get id from name... [{name}]")
return name.rsplit("/", 1)[1]
|
CloudVisionHook
|
python
|
numba__numba
|
numba/tests/test_function_type.py
|
{
"start": 2392,
"end": 17045
}
|
class ____(TestCase):
"""Test first-class functions in the context of a Numba jit compiled
function.
"""
def test_in__(self):
"""Function is passed in as an argument.
"""
def a(i):
return i + 1
def foo(f):
return 0
sig = int64(int64)
for decor in [mk_cfunc_func(sig),
njit_func,
mk_njit_with_sig_func(sig),
mk_ctypes_func(sig),
mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__, jit=jit_opts):
a_ = decor(a)
self.assertEqual(jit_(foo)(a_), foo(a))
def test_in_call__(self):
"""Function is passed in as an argument and called.
Also test different return values.
"""
def a_i64(i):
return i + 1234567
def a_f64(i):
return i + 1.5
def a_str(i):
return "abc"
def foo(f):
return f(123)
for f, sig in [(a_i64, int64(int64)), (a_f64, float64(int64))]:
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig),
mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(
sig=sig, decor=decor.__name__, jit=jit_opts):
f_ = decor(f)
self.assertEqual(jit_(foo)(f_), foo(f))
def test_in_call_out(self):
"""Function is passed in as an argument, called, and returned.
"""
def a(i):
return i + 1
def foo(f):
f(123)
return f
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
r1 = jit_(foo)(a_).pyfunc
r2 = foo(a)
self.assertEqual(r1, r2)
def test_in_seq_call(self):
"""Functions are passed in as arguments, used as tuple items, and
called.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(f, g):
r = 0
for f_ in (f, g):
r = r + f_(r)
return r
sig = int64(int64)
for decor in [mk_cfunc_func(sig), mk_wap_func(sig),
mk_njit_with_sig_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)(a_, b_), foo(a, b))
def test_in_ns_seq_call(self):
"""Functions are passed in as an argument and via namespace scoping
(mixed pathways), used as tuple items, and called.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def mkfoo(b_):
def foo(f):
r = 0
for f_ in (f, b_):
r = r + f_(r)
return r
return foo
sig = int64(int64)
for decor in [mk_cfunc_func(sig),
mk_njit_with_sig_func(sig), mk_wap_func(sig),
mk_ctypes_func(sig)][:-1]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(mkfoo(b_))(a_), mkfoo(b)(a))
def test_ns_call(self):
"""Function is passed in via namespace scoping and called.
"""
def a(i):
return i + 1
def mkfoo(a_):
def foo():
return a_(123)
return foo
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
self.assertEqual(jit_(mkfoo(a_))(), mkfoo(a)())
def test_ns_out(self):
"""Function is passed in via namespace scoping and returned.
"""
def a(i):
return i + 1
def mkfoo(a_):
def foo():
return a_
return foo
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig),
mk_ctypes_func(sig)][:-1]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
self.assertEqual(jit_(mkfoo(a_))().pyfunc, mkfoo(a)())
def test_ns_call_out(self):
"""Function is passed in via namespace scoping, called, and then
returned.
"""
def a(i):
return i + 1
def mkfoo(a_):
def foo():
a_(123)
return a_
return foo
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig),
mk_ctypes_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
self.assertEqual(jit_(mkfoo(a_))().pyfunc, mkfoo(a)())
def test_in_overload(self):
"""Function is passed in as an argument and called with different
argument types.
"""
def a(i):
return i + 1
def foo(f):
r1 = f(123)
r2 = f(123.45)
return (r1, r2)
for decor in [njit_func]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
self.assertEqual(jit_(foo)(a_), foo(a))
def test_ns_overload(self):
"""Function is passed in via namespace scoping and called with
different argument types.
"""
def a(i):
return i + 1
def mkfoo(a_):
def foo():
r1 = a_(123)
r2 = a_(123.45)
return (r1, r2)
return foo
for decor in [njit_func]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
self.assertEqual(jit_(mkfoo(a_))(), mkfoo(a)())
def test_in_choose(self):
"""Functions are passed in as arguments and called conditionally.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(a, b, choose_left):
if choose_left:
r = a(1)
else:
r = b(2)
return r
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)(a_, b_, True), foo(a, b, True))
self.assertEqual(jit_(foo)(a_, b_, False),
foo(a, b, False))
self.assertNotEqual(jit_(foo)(a_, b_, True),
foo(a, b, False))
def test_ns_choose(self):
"""Functions are passed in via namespace scoping and called
conditionally.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def mkfoo(a_, b_):
def foo(choose_left):
if choose_left:
r = a_(1)
else:
r = b_(2)
return r
return foo
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(mkfoo(a_, b_))(True),
mkfoo(a, b)(True))
self.assertEqual(jit_(mkfoo(a_, b_))(False),
mkfoo(a, b)(False))
self.assertNotEqual(jit_(mkfoo(a_, b_))(True),
mkfoo(a, b)(False))
def test_in_choose_out(self):
"""Functions are passed in as arguments and returned conditionally.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(a, b, choose_left):
if choose_left:
return a
else:
return b
sig = int64(int64)
for decor in [mk_cfunc_func(sig), njit_func,
mk_njit_with_sig_func(sig), mk_wap_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)(a_, b_, True).pyfunc,
foo(a, b, True))
self.assertEqual(jit_(foo)(a_, b_, False).pyfunc,
foo(a, b, False))
self.assertNotEqual(jit_(foo)(a_, b_, True).pyfunc,
foo(a, b, False))
def test_in_choose_func_value(self):
"""Functions are passed in as arguments, selected conditionally and
called.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(a, b, choose_left):
if choose_left:
f = a
else:
f = b
return f(1)
sig = int64(int64)
for decor in [mk_cfunc_func(sig), mk_wap_func(sig), njit_func,
mk_njit_with_sig_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)(a_, b_, True), foo(a, b, True))
self.assertEqual(jit_(foo)(a_, b_, False),
foo(a, b, False))
self.assertNotEqual(jit_(foo)(a_, b_, True),
foo(a, b, False))
def test_in_pick_func_call(self):
"""Functions are passed in as items of tuple argument, retrieved via
indexing, and called.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(funcs, i):
f = funcs[i]
r = f(123)
return r
sig = int64(int64)
for decor in [mk_cfunc_func(sig), mk_wap_func(sig),
mk_njit_with_sig_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)((a_, b_), 0), foo((a, b), 0))
self.assertEqual(jit_(foo)((a_, b_), 1), foo((a, b), 1))
self.assertNotEqual(jit_(foo)((a_, b_), 0), foo((a, b), 1))
def test_in_iter_func_call(self):
"""Functions are passed in as items of tuple argument, retrieved via
indexing, and called within a variable for-loop.
"""
def a(i):
return i + 1
def b(i):
return i + 2
def foo(funcs, n):
r = 0
for i in range(n):
f = funcs[i]
r = r + f(r)
return r
sig = int64(int64)
for decor in [mk_cfunc_func(sig), mk_wap_func(sig),
mk_njit_with_sig_func(sig)]:
for jit_opts in [dict(nopython=True), dict(forceobj=True)]:
jit_ = jit(**jit_opts)
with self.subTest(decor=decor.__name__):
a_ = decor(a)
b_ = decor(b)
self.assertEqual(jit_(foo)((a_, b_), 2), foo((a, b), 2))
def test_experimental_feature_warning(self):
@jit(nopython=True)
def more(x):
return x + 1
@jit(nopython=True)
def less(x):
return x - 1
@jit(nopython=True)
def foo(sel, x):
fn = more if sel else less
return fn(x)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
res = foo(True, 10)
self.assertEqual(res, 11)
self.assertEqual(foo(False, 10), 9)
self.assertGreaterEqual(len(ws), 1)
pat = "First-class function type feature is experimental"
for w in ws:
if pat in str(w.message):
break
else:
self.fail("missing warning")
|
TestFunctionType
|
python
|
getsentry__sentry
|
src/sentry/identity/vsts_extension/provider.py
|
{
"start": 65,
"end": 545
}
|
class ____(VSTSIdentityProvider):
"""
Functions exactly the same as ``VSTSIdentityProvider``.
This class is necessary because of how Integration Pipelines look up
sibling/dependent classes using ``key``.
The IntegrationProvider for the VSTS Extension is slightly different from
the VSTS version, so it requires a new class. Hence, the Identity portion
also requires a new class; this one.
"""
key = "vsts-extension"
|
VstsExtensionIdentityProvider
|
python
|
pandas-dev__pandas
|
pandas/tests/plotting/test_boxplot_method.py
|
{
"start": 15372,
"end": 29894
}
|
class ____:
def test_boxplot_legacy1(self, hist_df):
grouped = hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
_check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
def test_boxplot_legacy1_return_type(self, hist_df):
grouped = hist_df.groupby(by="gender")
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
_check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy2(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(
np.random.default_rng(2).random((10, 3)),
index=MultiIndex.from_tuples(tuples),
)
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
_check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
@pytest.mark.slow
def test_boxplot_legacy2_return_type(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(
np.random.default_rng(2).random((10, 3)),
index=MultiIndex.from_tuples(tuples),
)
grouped = df.groupby(level=1)
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
_check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.default_rng(2).normal(166, 20, size=n))
height = Series(np.random.default_rng(2).normal(60, 10, size=n))
gender = np.random.default_rng(2).choice(["male", "female"], size=n)
df = DataFrame({"height": height, "weight": weight, "gender": gender})
gb = df.groupby("gender")
res = gb.plot()
assert len(mpl.pyplot.get_fignums()) == 2
assert len(res) == 2
plt.close("all")
res = gb.boxplot(return_type="axes")
assert len(mpl.pyplot.get_fignums()) == 1
assert len(res) == 2
def test_grouped_plot_fignums_excluded_col(self):
n = 10
weight = Series(np.random.default_rng(2).normal(166, 20, size=n))
height = Series(np.random.default_rng(2).normal(60, 10, size=n))
gender = np.random.default_rng(2).choice(["male", "female"], size=n)
df = DataFrame({"height": height, "weight": weight, "gender": gender})
# now works with GH 5610 as gender is excluded
df.groupby("gender").hist()
@pytest.mark.slow
def test_grouped_box_return_type(self, hist_df):
df = hist_df
# old style: return_type=None
result = df.boxplot(by="gender")
assert isinstance(result, np.ndarray)
_check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
@pytest.mark.slow
def test_grouped_box_return_type_groupby(self, hist_df):
df = hist_df
# now for groupby
result = df.groupby("gender").boxplot(return_type="dict")
_check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
@pytest.mark.slow
@pytest.mark.parametrize("return_type", ["dict", "axes", "both"])
def test_grouped_box_return_type_arg(self, hist_df, return_type):
df = hist_df
returned = df.groupby("classroom").boxplot(return_type=return_type)
_check_box_return_type(returned, return_type, expected_keys=["A", "B", "C"])
returned = df.boxplot(by="classroom", return_type=return_type)
_check_box_return_type(
returned, return_type, expected_keys=["height", "weight", "category"]
)
@pytest.mark.slow
@pytest.mark.parametrize("return_type", ["dict", "axes", "both"])
def test_grouped_box_return_type_arg_duplcate_cats(self, return_type):
columns2 = "X B C D A".split()
df2 = DataFrame(
np.random.default_rng(2).standard_normal((6, 5)), columns=columns2
)
categories2 = "A B".split()
df2["category"] = categories2 * 3
returned = df2.groupby("category").boxplot(return_type=return_type)
_check_box_return_type(returned, return_type, expected_keys=categories2)
returned = df2.boxplot(by="category", return_type=return_type)
_check_box_return_type(returned, return_type, expected_keys=columns2)
@pytest.mark.slow
def test_grouped_box_layout_too_small(self, hist_df):
df = hist_df
msg = "Layout of 1x1 must be larger than required size 2"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1))
@pytest.mark.slow
def test_grouped_box_layout_needs_by(self, hist_df):
df = hist_df
msg = "The 'layout' keyword is not supported when 'by' is None"
with pytest.raises(ValueError, match=msg):
df.boxplot(
column=["height", "weight", "category"],
layout=(2, 1),
return_type="dict",
)
@pytest.mark.slow
def test_grouped_box_layout_positive_layout(self, hist_df):
df = hist_df
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1))
@pytest.mark.slow
@pytest.mark.parametrize(
"gb_key, axes_num, rows",
[["gender", 2, 1], ["category", 4, 2], ["classroom", 3, 2]],
)
def test_grouped_box_layout_positive_layout_axes(
self, hist_df, gb_key, axes_num, rows
):
df = hist_df
# _check_plot_works adds an ax so catch warning. see GH #13188 GH 6769
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
_check_plot_works(
df.groupby(gb_key).boxplot, column="height", return_type="dict"
)
_check_axes_shape(mpl.pyplot.gcf().axes, axes_num=axes_num, layout=(rows, 2))
@pytest.mark.slow
@pytest.mark.parametrize(
"col, visible", [["height", False], ["weight", True], ["category", True]]
)
def test_grouped_box_layout_visible(self, hist_df, col, visible):
df = hist_df
# GH 5897
axes = df.boxplot(
column=["height", "weight", "category"], by="gender", return_type="axes"
)
_check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2))
ax = axes[col]
_check_visible(ax.get_xticklabels(), visible=visible)
_check_visible([ax.xaxis.get_label()], visible=visible)
@pytest.mark.slow
def test_grouped_box_layout_shape(self, hist_df):
df = hist_df
df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="dict"
)
_check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2))
@pytest.mark.slow
@pytest.mark.parametrize("cols", [2, -1])
def test_grouped_box_layout_works(self, hist_df, cols):
df = hist_df
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
_check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, cols),
return_type="dict",
)
_check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(3, 2))
@pytest.mark.slow
@pytest.mark.parametrize("rows, res", [[4, 4], [-1, 3]])
def test_grouped_box_layout_axes_shape_rows(self, hist_df, rows, res):
df = hist_df
df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(rows, 1)
)
_check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(res, 1))
@pytest.mark.slow
@pytest.mark.parametrize("cols, res", [[4, 4], [-1, 3]])
def test_grouped_box_layout_axes_shape_cols_groupby(self, hist_df, cols, res):
df = hist_df
df.groupby("classroom").boxplot(
column=["height", "weight", "category"],
layout=(1, cols),
return_type="dict",
)
_check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(1, res))
@pytest.mark.slow
def test_grouped_box_multiple_axes(self, hist_df):
# GH 6970, GH 7069
df = hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning, match="sharex and sharey"):
_, axes = mpl.pyplot.subplots(2, 2)
df.groupby("category").boxplot(column="height", return_type="axes", ax=axes)
_check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(2, 2))
@pytest.mark.slow
def test_grouped_box_multiple_axes_on_fig(self, hist_df):
# GH 6970, GH 7069
df = hist_df
fig, axes = mpl.pyplot.subplots(2, 3)
with tm.assert_produces_warning(UserWarning, match="sharex and sharey"):
returned = df.boxplot(
column=["height", "weight", "category"],
by="gender",
return_type="axes",
ax=axes[0],
)
returned = np.array(list(returned.values))
_check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
# draw on second row
with tm.assert_produces_warning(UserWarning, match="sharex and sharey"):
returned = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="axes", ax=axes[1]
)
returned = np.array(list(returned.values))
_check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
@pytest.mark.slow
def test_grouped_box_multiple_axes_ax_error(self, hist_df):
# GH 6970, GH 7069
df = hist_df
msg = "The number of passed axes must be 3, the same as the output plot"
_, axes = mpl.pyplot.subplots(2, 3)
with pytest.raises(ValueError, match=msg):
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning, match="sharex and sharey"):
axes = df.groupby("classroom").boxplot(ax=axes)
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
_check_ticks_props(
df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16
)
@pytest.mark.parametrize(
"col, expected_xticklabel",
[
("v", ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]),
(["v"], ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]),
("v1", ["(a, v1)", "(b, v1)", "(c, v1)", "(d, v1)", "(e, v1)"]),
(
["v", "v1"],
[
"(a, v)",
"(a, v1)",
"(b, v)",
"(b, v1)",
"(c, v)",
"(c, v1)",
"(d, v)",
"(d, v1)",
"(e, v)",
"(e, v1)",
],
),
(
None,
[
"(a, v)",
"(a, v1)",
"(b, v)",
"(b, v1)",
"(c, v)",
"(c, v1)",
"(d, v)",
"(d, v1)",
"(e, v)",
"(e, v1)",
],
),
],
)
def test_groupby_boxplot_subplots_false(self, col, expected_xticklabel):
# GH 16748
df = DataFrame(
{
"cat": np.random.default_rng(2).choice(list("abcde"), 100),
"v": np.random.default_rng(2).random(100),
"v1": np.random.default_rng(2).random(100),
}
)
grouped = df.groupby("cat")
axes = _check_plot_works(
grouped.boxplot, subplots=False, column=col, return_type="axes"
)
result_xticklabel = [x.get_text() for x in axes.get_xticklabels()]
assert expected_xticklabel == result_xticklabel
def test_groupby_boxplot_object(self, hist_df):
# GH 43480
df = hist_df.astype("object")
grouped = df.groupby("gender")
msg = "boxplot method requires numerical columns, nothing to plot"
with pytest.raises(ValueError, match=msg):
_check_plot_works(grouped.boxplot, subplots=False)
def test_boxplot_multiindex_column(self):
# GH 16748
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 8)),
index=["A", "B", "C"],
columns=index,
)
col = [("bar", "one"), ("bar", "two")]
axes = _check_plot_works(df.boxplot, column=col, return_type="axes")
expected_xticklabel = ["(bar, one)", "(bar, two)"]
result_xticklabel = [x.get_text() for x in axes.get_xticklabels()]
assert expected_xticklabel == result_xticklabel
@pytest.mark.parametrize("group", ["X", ["X", "Y"]])
def test_boxplot_multi_groupby_groups(self, group):
# GH 14701
rows = 20
df = DataFrame(
np.random.default_rng(12).normal(size=(rows, 2)), columns=["Col1", "Col2"]
)
df["X"] = Series(np.repeat(["A", "B"], int(rows / 2)))
df["Y"] = Series(np.tile(["C", "D"], int(rows / 2)))
grouped = df.groupby(group)
_check_plot_works(df.boxplot, by=group, default_axes=True)
_check_plot_works(df.plot.box, by=group, default_axes=True)
_check_plot_works(grouped.boxplot, default_axes=True)
|
TestDataFrameGroupByPlots
|
python
|
django__django
|
tests/file_storage/test_base.py
|
{
"start": 390,
"end": 2799
}
|
class ____(SimpleTestCase):
invalid_file_names = [
os.path.join("path", "to", os.pardir, "test.file"),
os.path.join(os.path.sep, "path", "to", "test.file"),
]
error_msg = "Detected path traversal attempt in '%s'"
def test_validate_before_get_available_name(self):
s = CustomStorage()
# The initial name passed to `save` is not valid nor safe, fail early.
for name in self.invalid_file_names:
with (
self.subTest(name=name),
mock.patch.object(s, "get_available_name") as mock_get_available_name,
mock.patch.object(s, "_save") as mock_internal_save,
):
with self.assertRaisesMessage(
SuspiciousFileOperation, self.error_msg % name
):
s.save(name, content="irrelevant")
self.assertEqual(mock_get_available_name.mock_calls, [])
self.assertEqual(mock_internal_save.mock_calls, [])
def test_validate_after_get_available_name(self):
s = CustomStorage()
# The initial name passed to `save` is valid and safe, but the returned
# name from `get_available_name` is not.
for name in self.invalid_file_names:
with (
self.subTest(name=name),
mock.patch.object(s, "get_available_name", return_value=name),
mock.patch.object(s, "_save") as mock_internal_save,
):
with self.assertRaisesMessage(
SuspiciousFileOperation, self.error_msg % name
):
s.save("valid-file-name.txt", content="irrelevant")
self.assertEqual(mock_internal_save.mock_calls, [])
def test_validate_after_internal_save(self):
s = CustomStorage()
# The initial name passed to `save` is valid and safe, but the result
# from `_save` is not (this is achieved by monkeypatching _save).
for name in self.invalid_file_names:
with (
self.subTest(name=name),
mock.patch.object(s, "_save", return_value=name),
):
with self.assertRaisesMessage(
SuspiciousFileOperation, self.error_msg % name
):
s.save("valid-file-name.txt", content="irrelevant")
|
StorageValidateFileNameTests
|
python
|
pandas-dev__pandas
|
pandas/core/interchange/buffer.py
|
{
"start": 2130,
"end": 3428
}
|
class ____(Buffer):
"""
Data in the buffer is guaranteed to be contiguous in memory.
"""
def __init__(
self,
buffer: pa.Buffer,
*,
length: int,
) -> None:
"""
Handle pyarrow chunked arrays.
"""
self._buffer = buffer
self._length = length
@property
def bufsize(self) -> int:
"""
Buffer size in bytes.
"""
return self._buffer.size
@property
def ptr(self) -> int:
"""
Pointer to start of the buffer as an integer.
"""
return self._buffer.address
def __dlpack__(self) -> Any:
"""
Represent this structure as DLPack interface.
"""
raise NotImplementedError
def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
"""
Device type and device ID for where the data in the buffer resides.
"""
return (DlpackDeviceType.CPU, None)
def __repr__(self) -> str:
return (
"PandasBuffer[pyarrow]("
+ str(
{
"bufsize": self.bufsize,
"ptr": self.ptr,
"device": "CPU",
}
)
+ ")"
)
|
PandasBufferPyarrow
|
python
|
ansible__ansible
|
lib/ansible/plugins/action/normal.py
|
{
"start": 869,
"end": 1854
}
|
class ____(ActionBase):
_supports_check_mode = True
_supports_async = True
def run(self, tmp=None, task_vars=None):
# individual modules might disagree but as the generic the action plugin, pass at this point.
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
wrap_async = self._task.async_val and not self._connection.has_native_async
# do work!
result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=wrap_async))
# hack to keep --verbose from showing all the setup module result
# moved from setup module as now we filter out all _ansible_ from result
if self._task.action in C._ACTION_SETUP:
result['_ansible_verbose_override'] = True
if not wrap_async:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
|
ActionModule
|
python
|
mlflow__mlflow
|
mlflow/llama_index/pyfunc_wrapper.py
|
{
"start": 2059,
"end": 3379
}
|
class ____:
def __init__(
self,
llama_model, # Engine or Workflow
model_config: dict[str, Any] | None = None,
):
self._llama_model = llama_model
self.model_config = model_config or {}
@property
def index(self):
return self._llama_model.index
def get_raw_model(self):
return self._llama_model
def _predict_single(self, *args, **kwargs) -> Any:
raise NotImplementedError
def _format_predict_input(self, data):
raise NotImplementedError
def _do_inference(self, input, params: dict[str, Any] | None) -> dict[str, Any]:
"""
Perform engine inference on a single engine input e.g. not an iterable of
engine inputs. The engine inputs must already be preprocessed/cleaned.
"""
if isinstance(input, dict):
return self._predict_single(**input, **(params or {}))
else:
return self._predict_single(input, **(params or {}))
def predict(self, data, params: dict[str, Any] | None = None) -> list[str] | str:
data = self._format_predict_input(data)
if isinstance(data, list):
return [self._do_inference(x, params) for x in data]
else:
return self._do_inference(data, params)
|
_LlamaIndexModelWrapperBase
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/taint_in_taint_out.py
|
{
"start": 2759,
"end": 3139
}
|
class ____(GetQuery):
def __init__(self, arg):
GetQuery.__init__(self, arg)
def test_explicit_call_to_superclass():
user = GetUser(_test_source())
_test_sink(user.arg)
def evaluate_lazy(payload: Dict[str, str]):
return {key: value for key, value in payload.items()}
def test_simplified_evaluator():
_test_sink(evaluate_lazy(_test_source()))
|
GetUser
|
python
|
huggingface__transformers
|
tests/models/align/test_modeling_align.py
|
{
"start": 13275,
"end": 15523
}
|
class ____:
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = AlignTextModelTester(parent, **text_kwargs)
self.vision_model_tester = AlignVisionModelTester(parent, **vision_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.is_training = is_training
def prepare_config_and_inputs(self):
test_config, input_ids, token_type_ids, input_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, pixel_values
def get_config(self):
return AlignConfig(
text_config=self.text_model_tester.get_config().to_dict(),
vision_config=self.vision_model_tester.get_config().to_dict(),
projection_dim=64,
)
def create_and_check_model(self, config, input_ids, token_type_ids, attention_mask, pixel_values):
model = AlignModel(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, pixel_values, attention_mask, token_type_ids)
self.parent.assertEqual(
result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size)
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, token_type_ids, input_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
"pixel_values": pixel_values,
"return_loss": True,
}
return config, inputs_dict
@require_torch
|
AlignModelTester
|
python
|
ansible__ansible
|
lib/ansible/plugins/filter/core.py
|
{
"start": 23773,
"end": 27283
}
|
class ____(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
# base 64
'b64decode': b64decode,
'b64encode': b64encode,
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': from_json,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': from_yaml,
'from_yaml_all': from_yaml_all,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'expandvars': partial(unicode_wrap, os.path.expandvars),
'path_join': path_join,
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
'commonpath': commonpath,
'normpath': partial(unicode_wrap, os.path.normpath),
# file glob
'fileglob': fileglob,
# types
'bool': to_bool,
'to_datetime': to_datetime,
# date formatting
'strftime': strftime,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digest of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksumming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
'regex_search': regex_search,
'regex_findall': regex_findall,
# ? : ;
'ternary': ternary,
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# comment-style decoration
'comment': comment,
# debug
'type_debug': type_debug,
# Data structures
'combine': combine,
'extract': extract,
'flatten': flatten,
'dict2items': dict_to_list_of_dict_key_value_elements,
'items2dict': list_of_dict_key_value_elements_to_dict,
'subelements': subelements,
'split': partial(unicode_wrap, str.split),
# FDI038 - replace this with a standard type compat shim
'groupby': _cleansed_groupby,
# Jinja builtins that need special arg handling
'd': ansible_default, # replaces the implementation instead of wrapping it
'default': ansible_default, # replaces the implementation instead of wrapping it
'map': wrapped_map,
'select': wrapped_select,
'selectattr': wrapped_selectattr,
'reject': wrapped_reject,
'rejectattr': wrapped_rejectattr,
}
|
FilterModule
|
python
|
tiangolo__fastapi
|
docs_src/cookie_param_models/tutorial001_py310.py
|
{
"start": 86,
"end": 303
}
|
class ____(BaseModel):
session_id: str
fatebook_tracker: str | None = None
googall_tracker: str | None = None
@app.get("/items/")
async def read_items(cookies: Cookies = Cookie()):
return cookies
|
Cookies
|
python
|
ApeWorX__ape
|
src/ape_compile/config.py
|
{
"start": 568,
"end": 2816
}
|
class ____(PluginConfig):
"""
Configure general compiler settings.
"""
exclude: set[Union[str, Pattern]] = set()
"""
Source exclusion globs or regex patterns across all file types.
To use regex, start your values with ``r"`` and they'll be turned
into regex pattern objects.
**NOTE**: ``ape.utils.misc.SOURCE_EXCLUDE_PATTERNS`` are automatically
included in this set.
"""
include_dependencies: bool = False
"""
Set to ``True`` to compile dependencies during ``ape compile``.
Generally, dependencies are not compiled during ``ape compile``
This is because dependencies may not compile in Ape on their own,
but you can still reference them in your project's contracts' imports.
Some projects may be more dependency-based and wish to have the
contract types always compiled during ``ape compile``, and these projects
should configure ``include_dependencies`` to be ``True``.
"""
output_extra: list[OutputExtras] = []
"""
Extra selections to output. Outputs to ``.build/{key.lower()}``.
"""
model_config = SettingsConfigDict(extra="allow", env_prefix="APE_COMPILE_")
@field_validator("exclude", mode="before")
@classmethod
def validate_exclude(cls, value):
given_values = []
# Convert regex to Patterns.
for given in value or []:
if (given.startswith('r"') and given.endswith('"')) or (
given.startswith("r'") and given.endswith("'")
):
value_clean = given[2:-1]
pattern = re.compile(value_clean)
given_values.append(pattern)
else:
given_values.append(given)
# Include defaults.
return {*given_values, *SOURCE_EXCLUDE_PATTERNS}
@field_serializer("exclude", when_used="json")
def serialize_exclude(self, exclude, info):
"""
Exclude is put back with the weird r-prefix so we can
go to-and-from.
"""
result: list[str] = []
for excl in exclude:
if isinstance(excl, Pattern):
result.append(f'r"{excl.pattern}"')
else:
result.append(excl)
return result
|
Config
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_axis31.py
|
{
"start": 315,
"end": 1398
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis31.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [90616960, 90618496]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
chart.set_y_axis({"position_axis": "on_tick"})
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
getsentry__sentry
|
src/sentry/grouping/fingerprinting/rules.py
|
{
"start": 1067,
"end": 1341
}
|
class ____(TypedDict):
text: str
# Each matcher is a list of [<name of event attribute to match>, <value to match>]
matchers: list[list[str]]
fingerprint: list[str]
attributes: FingerprintRuleAttributes
is_builtin: NotRequired[bool]
|
FingerprintRuleJSON
|
python
|
realpython__materials
|
wordcount/tests/realpython/models.py
|
{
"start": 3863,
"end": 6514
}
|
class ____:
tests: tuple[Test, ...]
@classmethod
def from_session(cls, session: Session) -> Self:
tests = []
for item in session.items:
if STASH_REPORT_KEY in item.stash:
report = item.stash[STASH_REPORT_KEY]
if "Failed: Timeout >" in report.longreprtext:
status = TestStatus.TIMED_OUT
else:
status = TestStatus(report.outcome)
if hasattr(report, "exception"):
exception = report.exception
else:
exception = None
tests.append(Test(item, status, exception))
return cls(tuple(tests))
@cached_property
def num_passed(self) -> int:
return sum(
1 for test in self.tests if test.status is TestStatus.PASSED
)
@cached_property
def num_tests(self) -> int:
return len(self.tests)
@cached_property
def num_tasks(self) -> int:
return len({test.task_number for test in self.tests})
@property
def tests_by_task(self) -> Iterator[tuple[int, Iterator[Test]]]:
# Assume tests have been already sorted by the task number
return groupby(self.tests, attrgetter("task_number"))
@cached_property
def status(self) -> TestStatus:
statuses = {test.status for test in self.tests}
if TestStatus.TIMED_OUT in statuses:
return TestStatus.TIMED_OUT
elif TestStatus.FAILED in statuses:
return TestStatus.FAILED
elif set(statuses) == {TestStatus.PASSED} or {
TestStatus.PASSED,
TestStatus.SKIPPED,
}:
return TestStatus.PASSED
else:
raise ValueError("None of the tests were executed")
def task(self, task_number: int) -> Task:
for test in self.tests:
if test.task_number == task_number:
if test.function and hasattr(test.function, "task"):
return test.function.task
raise ValueError(f"invalid task number {task_number}")
def task_status(self, task_number: int) -> TestStatus:
statuses = {
test.status
for test in self.tests
if test.task_number == task_number
}
if statuses:
if TestStatus.TIMED_OUT in statuses:
return TestStatus.TIMED_OUT
elif TestStatus.FAILED in statuses:
return TestStatus.FAILED
else:
return TestStatus.PASSED
else:
raise ValueError(f"invalid task number {task_number}")
|
TestRun
|
python
|
milvus-io__pymilvus
|
pymilvus/client/interceptor.py
|
{
"start": 3215,
"end": 3985
}
|
class ____(ClientCallDetailsTuple, grpc.ClientCallDetails):
pass
def header_adder_interceptor(headers: List, values: List):
def intercept_call(
client_call_details: Any,
request_iterator: Any,
):
metadata = []
if client_call_details.metadata is not None:
metadata = list(client_call_details.metadata)
for item in zip(headers, values):
metadata.append(item)
client_call_details = _ClientCallDetails(
client_call_details.method,
client_call_details.timeout,
metadata,
client_call_details.credentials,
)
return client_call_details, request_iterator, None
return _GenericClientInterceptor(intercept_call)
|
_ClientCallDetails
|
python
|
jazzband__django-polymorphic
|
example/pexp/models.py
|
{
"start": 1517,
"end": 1594
}
|
class ____(TestModelA):
field2 = models.CharField(max_length=10)
|
TestModelB
|
python
|
ethereum__web3.py
|
tests/core/manager/test_middleware_can_be_stateful.py
|
{
"start": 157,
"end": 878
}
|
class ____(Web3Middleware):
state = []
def wrap_make_request(self, make_request):
def middleware(method, params):
self.state.append((method, params))
return {"jsonrpc": "2.0", "id": 1, "result": self.state}
return middleware
stateful_middleware = StatefulMiddleware
def test_middleware_holds_state_across_requests():
provider = BaseProvider()
manager = RequestManager(None, provider, middleware=[stateful_middleware])
state_a = manager.request_blocking("test_statefulness", [])
assert len(state_a) == 1
state_b = manager.request_blocking("test_statefulness", [])
assert id(state_a) == id(state_b)
assert len(state_b) == 2
|
StatefulMiddleware
|
python
|
django-compressor__django-compressor
|
compressor/tests/test_storages.py
|
{
"start": 1239,
"end": 3986
}
|
class ____(TestCase):
def setUp(self):
self.default_storage = storage.default_storage
storage.default_storage = GzipStorage()
storage.brotli_storage = BrotliStorage()
def tearDown(self):
storage.default_storage = self.default_storage
def test_gzip_storage(self):
storage.default_storage.save("test.txt", ContentFile("yeah yeah"))
self.assertTrue(
os.path.exists(os.path.join(settings.COMPRESS_ROOT, "test.txt"))
)
self.assertTrue(
os.path.exists(os.path.join(settings.COMPRESS_ROOT, "test.txt.gz"))
)
def test_brotli_storage(self):
payload = ",".join([str(i) for i in range(1000)]).encode()
chunk_size = 1024
storage.brotli_storage.save("test.txt", ContentFile(payload))
self.assertTrue(
os.path.exists(os.path.join(settings.COMPRESS_ROOT, "test.txt"))
)
self.assertTrue(
os.path.exists(os.path.join(settings.COMPRESS_ROOT, "test.txt.br"))
)
decompressed_data = b""
br_decompressor = brotli.Decompressor()
with open(os.path.join(settings.COMPRESS_ROOT, "test.txt.br"), "rb") as f:
for data in iter(lambda: f.read(chunk_size), b""):
decompressed_data += br_decompressor.process(data)
self.assertEqual(payload, decompressed_data)
def test_css_tag_with_storage(self):
template = """{% load compress %}{% compress css %}
<link rel="stylesheet" href="{{ STATIC_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid white;}</style>
<link rel="stylesheet" href="{{ STATIC_URL }}css/two.css" type="text/css">
{% endcompress %}
"""
context = {"STATIC_URL": settings.COMPRESS_URL}
out = css_tag("/static/CACHE/css/output.e701f86c6430.css")
self.assertEqual(out, render(template, context))
def test_duplicate_save_overwrites_same_file(self):
filename1 = self.default_storage.save("test.txt", ContentFile("yeah yeah"))
filename2 = self.default_storage.save("test.txt", ContentFile("yeah yeah"))
self.assertEqual(filename1, filename2)
self.assertNotIn("_", filename2)
def test_offline_manifest_storage(self):
storage.default_offline_manifest_storage.save(
"test.txt", ContentFile("yeah yeah")
)
self.assertTrue(
os.path.exists(os.path.join(settings.COMPRESS_ROOT, "CACHE", "test.txt"))
)
# Check that the file is stored at the same default location as before
# the new manifest storage.
self.assertTrue(self.default_storage.exists(os.path.join("CACHE", "test.txt")))
|
StorageTestCase
|
python
|
run-llama__llama_index
|
llama-index-integrations/tools/llama-index-tools-box/llama_index/tools/box/search/base.py
|
{
"start": 502,
"end": 3694
}
|
class ____:
"""
Represents options for searching Box resources.
This class provides a way to specify various criteria for filtering search results
when using the `BoxSearchToolSpec` class. You can define parameters like search
scope, file extensions, date ranges (created/updated at), size range, owner IDs,
and more to refine your search.
Attributes:
scope (Optional[SearchForContentScope]): The scope of the search (e.g., all
content, trashed content).
file_extensions (Optional[List[str]]): A list of file extensions to filter by.
created_at_range (Optional[List[str]]): A list representing a date range for
file creation time (format: YYYY-MM-DD).
updated_at_range (Optional[List[str]]): A list representing a date range for
file update time (format: YYYY-MM-DD).
size_range (Optional[List[int]]): A list representing a range for file size (in bytes).
owner_user_ids (Optional[List[str]]): A list of user IDs to filter by owner.
recent_updater_user_ids (Optional[List[str]]): A list of user IDs to filter by
recent updater.
ancestor_folder_ids (Optional[List[str]]): A list of folder IDs to search within.
content_types (Optional[List[SearchForContentContentTypes]]): A list of content
types to filter by.
limit (Optional[int]): The maximum number of search results to return.
offset (Optional[int]): The offset to start results from (for pagination).
"""
scope: Optional[SearchForContentScope] = None
file_extensions: Optional[List[str]] = None
created_at_range: Optional[List[str]] = None
updated_at_range: Optional[List[str]] = None
size_range: Optional[List[int]] = None
owner_user_ids: Optional[List[str]] = None
recent_updater_user_ids: Optional[List[str]] = None
ancestor_folder_ids: Optional[List[str]] = None
content_types: Optional[List[SearchForContentContentTypes]] = None
limit: Optional[int] = None
offset: Optional[int] = None
def __init__(
self,
scope: Optional[SearchForContentScope] = None,
file_extensions: Optional[List[str]] = None,
created_at_range: Optional[List[str]] = None,
updated_at_range: Optional[List[str]] = None,
size_range: Optional[List[int]] = None,
owner_user_ids: Optional[List[str]] = None,
recent_updater_user_ids: Optional[List[str]] = None,
ancestor_folder_ids: Optional[List[str]] = None,
content_types: Optional[List[SearchForContentContentTypes]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> None:
self.scope = scope
self.file_extensions = file_extensions
self.created_at_range = created_at_range
self.updated_at_range = updated_at_range
self.size_range = size_range
self.owner_user_ids = owner_user_ids
self.recent_updater_user_ids = recent_updater_user_ids
self.ancestor_folder_ids = ancestor_folder_ids
self.content_types = content_types
self.limit = limit
self.offset = offset
|
BoxSearchOptions
|
python
|
kamyu104__LeetCode-Solutions
|
Python/alien-dictionary.py
|
{
"start": 94,
"end": 1834
}
|
class ____(object):
def alienOrder(self, words):
"""
:type words: List[str]
:rtype: str
"""
result, in_degree, out_degree = [], {}, {}
zero_in_degree_queue = collections.deque()
nodes = set()
for word in words:
for c in word:
nodes.add(c)
for i in xrange(1, len(words)):
if (len(words[i-1]) > len(words[i]) and
words[i-1][:len(words[i])] == words[i]):
return ""
self.findEdges(words[i - 1], words[i], in_degree, out_degree)
for node in nodes:
if node not in in_degree:
zero_in_degree_queue.append(node)
while zero_in_degree_queue:
precedence = zero_in_degree_queue.popleft()
result.append(precedence)
if precedence in out_degree:
for c in out_degree[precedence]:
in_degree[c].discard(precedence)
if not in_degree[c]:
zero_in_degree_queue.append(c)
del out_degree[precedence]
if out_degree:
return ""
return "".join(result)
# Construct the graph.
def findEdges(self, word1, word2, in_degree, out_degree):
str_len = min(len(word1), len(word2))
for i in xrange(str_len):
if word1[i] != word2[i]:
if word2[i] not in in_degree:
in_degree[word2[i]] = set()
if word1[i] not in out_degree:
out_degree[word1[i]] = set()
in_degree[word2[i]].add(word1[i])
out_degree[word1[i]].add(word2[i])
break
# DFS solution.
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/634. Find the Derangement of An Array/634.py
|
{
"start": 0,
"end": 282
}
|
class ____:
def findDerangement(self, n: int) -> int:
MOD = 1_000_000_007
@functools.lru_cache(None)
def dp(i: int) -> int:
if i == 0:
return 1
if i == 1:
return 0
return (i - 1) * (dp(i - 1) + dp(i - 2)) % MOD
return dp(n)
|
Solution
|
python
|
encode__django-rest-framework
|
tests/test_views.py
|
{
"start": 3889,
"end": 4321
}
|
class ____(TestCase):
def setUp(self):
self.view = OverriddenSettingsView.as_view()
def test_get_exception_handler(self):
request = factory.get('/', content_type='application/json')
response = self.view(request)
assert response.status_code == 400
assert response.data == {'error': 'SyntaxError'}
@unittest.skipUnless(DJANGO_VERSION >= (5, 1), 'Only for Django 5.1+')
|
TestCustomSettings
|
python
|
SmileyChris__easy-thumbnails
|
easy_thumbnails/tests/test_templatetags.py
|
{
"start": 10687,
"end": 11793
}
|
class ____(ThumbnailerBase):
def test_get(self):
src = (
'{% with t=filename|thumbnailer %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
expected = self.verify_thumbnail(
(20, 20), settings.THUMBNAIL_ALIASES['']['small'])
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, expected_url)
def test_relative_name(self):
src = (
'{% with t=storage|thumbnailer:filename %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
expected = self.verify_thumbnail(
(20, 20), settings.THUMBNAIL_ALIASES['']['small'])
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, expected_url)
def test_invalid(self):
src = (
'{% with t=invalid_filename|thumbnailer %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
self.assertEqual(output, '')
|
ThumbnailerFilterTest
|
python
|
getsentry__sentry
|
tests/sentry/snuba/test_outcomes.py
|
{
"start": 733,
"end": 5796
}
|
class ____(TestCase):
def test_query_must_have_category(self) -> None:
with pytest.raises(InvalidQuery):
_make_query("statsPeriod=4d&interval=1d&field=sum(quantity)")
def test_invalid_field(self) -> None:
with pytest.raises(InvalidField):
_make_query("statsPeriod=4d&interval=1d&field=sum(badstuff)")
def test_empty_query(self) -> None:
with pytest.raises(InvalidField):
_make_query("")
def test_invalid_groupby(self) -> None:
with pytest.raises(InvalidField):
_make_query(
"statsPeriod=4d&interval=1d&field=sum(quantity)&groupBy=category&groupBy=no"
)
def test_invalid_category(self) -> None:
with pytest.raises(InvalidField):
_make_query("statsPeriod=4d&category=zzz&interval=1d&groupBy=category&groupBy=no")
def test_invalid_reason(self) -> None:
with pytest.raises(InvalidField):
_make_query("statsPeriod=4d&reason=zzz&interval=1d&groupBy=category&groupBy=no")
def test_invalid_outcome(self) -> None:
with pytest.raises(InvalidField):
_make_query("statsPeriod=4d&outcome=zzz&interval=1d&groupBy=category&groupBy=no")
def test_no_field(self) -> None:
with pytest.raises(InvalidField):
_make_query("statsPeriod=4d&interval=1d&groupBy=category&groupBy=no")
def test_no_combined_attachment(self) -> None:
with pytest.raises(InvalidQuery):
_make_query(
"statsPeriod=4d&interval=1d&category=error&category=attachment&field=sum(quantity)"
)
def test_correct_category_mapping(self) -> None:
query = _make_query(
"statsPeriod=4d&interval=1d&category=error&field=sum(quantity)",
{"organization_id": 1},
)
assert (
Condition(
Column("category"),
Op.IN,
[DataCategory.DEFAULT, DataCategory.ERROR, DataCategory.SECURITY],
)
) in query.conditions
def test_correct_reason_mapping(self) -> None:
query = _make_query(
"statsPeriod=4d&interval=1d&groupBy=category&reason=spike_protection&field=sum(quantity)",
{"organization_id": 1},
)
assert Condition(Column("reason"), Op.IN, ["smart_rate_limit"]) in query.conditions
def test_correct_outcome_mapping(self) -> None:
query = _make_query(
"statsPeriod=4d&interval=1d&groupBy=category&outcome=accepted&field=sum(quantity)",
{"organization_id": 1},
)
assert Condition(Column("outcome"), Op.IN, [Outcome.ACCEPTED]) in query.conditions
def test_correct_times_seen_aggregate(self) -> None:
query = _make_query(
"statsPeriod=6h&interval=10m&groupBy=category&field=sum(times_seen)",
{"organization_id": 1},
True,
)
assert Function("count", [], "times_seen") in query.select_params
query = _make_query(
"statsPeriod=6h&interval=1d&groupBy=category&field=sum(times_seen)",
{"organization_id": 1},
True,
)
assert Function("sum", [Column("times_seen")], "times_seen") in query.select_params
def test_filter_keys(self) -> None:
query = _make_query(
"statsPeriod=6h&interval=10m&groupBy=category&field=sum(times_seen)",
{"organization_id": 1},
True,
)
assert Condition(Column("org_id"), Op.EQ, 1) in query.conditions
query = _make_query(
"statsPeriod=6h&interval=1d&groupBy=category&field=sum(times_seen)",
{"organization_id": 1, "project_id": [1, 2, 3, 4, 5]},
True,
)
assert Condition(Column("org_id"), Op.EQ, 1) in query.conditions
assert Condition(Column("project_id"), Op.IN, [1, 2, 3, 4, 5]) in query.conditions
def test_key_id_filter(self) -> None:
query = _make_query(
"statsPeriod=4d&interval=1d&groupBy=category&key_id=12345&field=sum(quantity)",
{"organization_id": 1},
)
assert Condition(Column("key_id"), Op.IN, [12345]) in query.conditions
def test_key_id_filter_invalid(self) -> None:
with pytest.raises(InvalidQuery):
_make_query(
"statsPeriod=4d&interval=1d&groupBy=category&key_id=INVALID&field=sum(quantity)",
{"organization_id": 1},
)
def test_start_and_end_no_interval(self) -> None:
start = timezone.now()
end = start + timedelta(days=1)
query = _make_query(
urllib.parse.urlencode(
{
"groupBy": "category",
"field": "sum(quantity)",
"start": start.isoformat(),
"end": end.isoformat(),
}
),
{"organization_id": 1},
)
assert query.start
assert query.end
assert query.rollup == 3600
|
OutcomesQueryDefinitionTests
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.