language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | automl__auto-sklearn | autosklearn/experimental/askl2.py | {
"start": 711,
"end": 2356
} | class ____:
def __init__(self, portfolio):
self.portfolio = portfolio
def __call__(
self,
scenario_dict,
seed,
ta,
ta_kwargs,
metalearning_configurations,
n_jobs,
dask_client,
multi_objective_algorithm,
multi_objective_kwargs,
):
from smac.facade.smac_ac_facade import SMAC4AC
from smac.intensification.simple_intensifier import SimpleIntensifier
from smac.runhistory.runhistory2epm import RunHistory2EPM4LogCost
from smac.scenario.scenario import Scenario
scenario = Scenario(scenario_dict)
initial_configurations = []
for member in self.portfolio.values():
try:
hp_names = scenario.cs.get_hyperparameter_names()
_member = {key: member[key] for key in member if key in hp_names}
initial_configurations.append(
Configuration(configuration_space=scenario.cs, values=_member)
)
except ValueError:
pass
rh2EPM = RunHistory2EPM4LogCost
return SMAC4AC(
scenario=scenario,
rng=seed,
runhistory2epm=rh2EPM,
tae_runner=ta,
tae_runner_kwargs=ta_kwargs,
initial_configurations=initial_configurations,
intensifier=SimpleIntensifier,
run_id=seed,
n_jobs=n_jobs,
dask_client=dask_client,
multi_objective_algorithm=multi_objective_algorithm,
multi_objective_kwargs=multi_objective_kwargs,
)
| SmacObjectCallback |
python | joke2k__faker | faker/providers/sbn/__init__.py | {
"start": 168,
"end": 1978
} | class ____(BaseProvider):
"""Generates fake SBNs. These are the precursor to the ISBN and are
largely similar to ISBN-10.
See https://www.isbn-international.org/content/what-isbn for the
format of ISBNs. SBNs have no EAN prefix or Registration Group.
"""
def _body(self) -> List[str]:
"""Generate the information required to create an SBN"""
reg_pub_len: int = SBN.MAX_LENGTH - 1
# Generate a registrant/publication combination
reg_pub: str = self.numerify("#" * reg_pub_len)
# Use rules to separate the registrant from the publication
rules: List[RegistrantRule] = RULES
registrant, publication = self._registrant_publication(reg_pub, rules)
return [registrant, publication]
@staticmethod
def _registrant_publication(reg_pub: str, rules: List[RegistrantRule]) -> Tuple[str, str]:
"""Separate the registration from the publication in a given
string.
:param reg_pub: A string of digits representing a registration
and publication.
:param rules: A list of RegistrantRules which designate where
to separate the values in the string.
:returns: A (registrant, publication) tuple of strings.
"""
for rule in rules:
if rule.min <= reg_pub[:-1] <= rule.max:
reg_len = rule.registrant_length
break
else:
raise Exception("Registrant/Publication not found in registrant rule list.")
registrant, publication = reg_pub[:reg_len], reg_pub[reg_len:]
return registrant, publication
def sbn9(self, separator: str = "-") -> str:
registrant, publication = self._body()
sbn = SBN9(registrant, publication)
return sbn.format(separator)
| Provider |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox38.py | {
"start": 315,
"end": 1645
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox38.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [48060288, 48300032]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
worksheet.insert_image(
"E25",
self.image_dir + "red.png",
{"url": "https://github.com/jmcnamara/foo"},
)
worksheet.insert_textbox(
"G25", "This is some text", {"url": "https://github.com/jmcnamara/bar"}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/number-of-ways-to-separate-numbers.py | {
"start": 33,
"end": 1728
} | class ____(object):
def numberOfCombinations(self, num):
"""
:type num: str
:rtype: int
"""
MOD = 10**9+7
def find_longest_common_prefix(num):
lcp = [[0]*(len(num)+1) for _ in xrange(len(num)+1)] # lcp[i][j]: longest length of the common prefix which starts at num[i], num[j]
for i in reversed(xrange(len(lcp)-1)):
for j in reversed(xrange(len(lcp[0])-1)):
if num[i] == num[j]:
lcp[i][j] = lcp[i+1][j+1]+1
return lcp
def is_less_or_equal_to_with_same_length(num, lcp, i, j, l):
return lcp[i][j] >= l or num[i+lcp[i][j]] < num[j+lcp[i][j]]
lcp = find_longest_common_prefix(num)
dp = [[0]*len(num) for _ in xrange(len(num))] # dp[i][l]: the count of numbers ending at num[i], where the length of the last number is l+1
dp[0][0] = int(num[0] != '0')
for i in xrange(1, len(num)):
dp[i][i] = dp[i-1][i-1]
if num[i] == '0':
continue
accu = 0
for l in xrange(len(num)-i+1):
ni = i+l-1
dp[ni][l-1] = accu # accumulated count where the length of the second to last number ending at num[i-1] is shorter than the length of the last number ending at num[i+l-1]
if i-l < 0:
continue
if num[i-l] != '0' and is_less_or_equal_to_with_same_length(num, lcp, i-l, i, l):
dp[ni][l-1] = (dp[ni][l-1] + dp[i-1][l-1]) % MOD
accu = (accu + dp[i-1][l-1]) % MOD
return reduce(lambda total, x: (total+x)%MOD, dp[-1], 0)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/cli_shared_test.py | {
"start": 1248,
"end": 2983
} | class ____(test_util.TensorFlowTestCase):
def testNoneSizeWorks(self):
self.assertEqual(str(None), cli_shared.bytes_to_readable_str(None))
def testSizesBelowOneKiloByteWorks(self):
self.assertEqual("0", cli_shared.bytes_to_readable_str(0))
self.assertEqual("500", cli_shared.bytes_to_readable_str(500))
self.assertEqual("1023", cli_shared.bytes_to_readable_str(1023))
def testSizesBetweenOneKiloByteandOneMegaByteWorks(self):
self.assertEqual("1.00k", cli_shared.bytes_to_readable_str(1024))
self.assertEqual("2.40k", cli_shared.bytes_to_readable_str(int(1024 * 2.4)))
self.assertEqual("1023.00k", cli_shared.bytes_to_readable_str(1024 * 1023))
def testSizesBetweenOneMegaByteandOneGigaByteWorks(self):
self.assertEqual("1.00M", cli_shared.bytes_to_readable_str(1024**2))
self.assertEqual("2.40M",
cli_shared.bytes_to_readable_str(int(1024**2 * 2.4)))
self.assertEqual("1023.00M",
cli_shared.bytes_to_readable_str(1024**2 * 1023))
def testSizeAboveOneGigaByteWorks(self):
self.assertEqual("1.00G", cli_shared.bytes_to_readable_str(1024**3))
self.assertEqual("2000.00G",
cli_shared.bytes_to_readable_str(1024**3 * 2000))
def testReadableStrIncludesBAtTheEndOnRequest(self):
self.assertEqual("0B", cli_shared.bytes_to_readable_str(0, include_b=True))
self.assertEqual(
"1.00kB", cli_shared.bytes_to_readable_str(
1024, include_b=True))
self.assertEqual(
"1.00MB", cli_shared.bytes_to_readable_str(
1024**2, include_b=True))
self.assertEqual(
"1.00GB", cli_shared.bytes_to_readable_str(
1024**3, include_b=True))
| BytesToReadableStrTest |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 2840,
"end": 3115
} | class ____(AutoEnum):
"""Enumeration of work pool statuses."""
READY = AutoEnum.auto()
NOT_READY = AutoEnum.auto()
PAUSED = AutoEnum.auto()
@property
def display_name(self) -> str:
return self.name.replace("_", " ").capitalize()
| WorkPoolStatus |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/alert_types.py | {
"start": 102,
"end": 907
} | class ____(Enum):
"""Possible comparison operators for an insights alert type, used to
determine when to trigger an alert based on the value of the metric.
"""
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
def compare(self, computed_value: float, target_value: float) -> bool:
if self == InsightsAlertComparisonOperator.LESS_THAN:
return computed_value < target_value
return computed_value > target_value
def as_text(self) -> str:
"""Used in alert text to describe the comparison operator,
e.g. usage is less than the limit or usage is greater than the limit.
"""
if self == InsightsAlertComparisonOperator.LESS_THAN:
return "less than"
return "greater than"
| InsightsAlertComparisonOperator |
python | getsentry__sentry | src/sentry/seer/breakpoints.py | {
"start": 1453,
"end": 1608
} | class ____(TypedDict):
data: "list[SnubaTSEntry]"
request_start: int
request_end: int
data_start: int
data_end: int
| BreakpointTransaction |
python | weaviate__weaviate-python-client | weaviate/groups/async_.py | {
"start": 312,
"end": 383
} | class ____(_GroupsOIDCExecutor[ConnectionAsync]):
pass
| _GroupsOIDCAsync |
python | jina-ai__jina | jina/enums.py | {
"start": 7286,
"end": 7422
} | class ____(str, Enum):
"""Subprotocol supported with Websocket Gateway"""
JSON = 'json'
BYTES = 'bytes'
| WebsocketSubProtocols |
python | ray-project__ray | release/benchmark-worker-startup/benchmark_worker_startup.py | {
"start": 3525,
"end": 7434
} | class ____:
"""
Actor which tests will report metrics to.
"""
def __init__(self, expected_measurements_per_test: int):
self.measurements = defaultdict(list)
self.expected_measurements_per_test = expected_measurements_per_test
def submit(self, test_name: str, latency: float):
print(f"got latency {latency} s for test {test_name}")
self.measurements[test_name].append(latency)
results = self.create_results_dict_from_measurements(
self.measurements, self.expected_measurements_per_test
)
safe_write_to_results_json(results)
assert (
len(self.measurements[test_name]) <= self.expected_measurements_per_test
), (
f"Expected {self.measurements[test_name]} to not have more elements than "
f"{self.expected_measurements_per_test}"
)
@staticmethod
def create_results_dict_from_measurements(
all_measurements, expected_measurements_per_test
):
results = {}
perf_metrics = []
for test_name, measurements in all_measurements.items():
test_summary = {
"measurements": measurements,
}
if len(measurements) == expected_measurements_per_test:
median = statistics.median(measurements)
test_summary["p50"] = median
perf_metrics.append(
{
"perf_metric_name": f"p50.{test_name}",
"perf_metric_value": median,
"perf_metric_type": "LATENCY",
}
)
results[test_name] = test_summary
results["perf_metrics"] = perf_metrics
return results
def print_disk_config():
print("Getting disk sizes via df -h")
subprocess.check_call("df -h", shell=True)
def generate_test_matrix(
num_cpus_in_cluster: int,
num_gpus_in_cluster: int,
num_tasks_or_actors_per_run: int,
num_measurements_per_test: int,
):
num_repeated_jobs_or_runs = num_measurements_per_test
total_num_tasks_or_actors = num_tasks_or_actors_per_run * num_repeated_jobs_or_runs
num_jobs_per_type = {
"cold_start": num_repeated_jobs_or_runs,
"warm_start": 1,
}
imports_to_try = ["torch", "none"]
tests = set()
for with_tasks in [True, False]:
for with_gpu in [True, False]:
# Do not run without runtime env. TODO(cade) Infra team added cgroups to
# default runtime env, need to find some way around that if we want
# "pure" (non-runtime-env) measurements.
for with_runtime_env in [True]:
for import_to_try in imports_to_try:
for num_jobs in num_jobs_per_type.values():
num_tasks_or_actors_per_job = (
total_num_tasks_or_actors // num_jobs
)
num_runs_per_job = (
num_tasks_or_actors_per_job // num_tasks_or_actors_per_run
)
test = TestConfiguration(
num_jobs=num_jobs,
num_runs_per_job=num_runs_per_job,
num_tasks_or_actors_per_run=num_tasks_or_actors_per_run,
with_tasks=with_tasks,
with_gpu=with_gpu,
with_runtime_env=with_runtime_env,
import_to_try=import_to_try,
num_cpus_in_cluster=num_cpus_in_cluster,
num_gpus_in_cluster=num_gpus_in_cluster,
num_nodes_in_cluster=1,
)
tests.add(test)
return tests
@dataclass(eq=True, frozen=True)
| MetricsActor |
python | pytorch__pytorch | torch/__init__.py | {
"start": 70306,
"end": 70529
} | class ____(_LegacyStorage):
@classproperty
def dtype(self):
_warn_typed_storage_removal(stacklevel=3)
return self._dtype
@classproperty
def _dtype(self):
return torch.uint8
| ByteStorage |
python | getsentry__sentry | src/sentry/sentry_apps/installations.py | {
"start": 4179,
"end": 7225
} | class ____:
organization_id: int
slug: str
notify: bool = True
def run(self, *, user: User | RpcUser, request: HttpRequest | None) -> SentryAppInstallation:
with SentryAppInteractionEvent(
operation_type=SentryAppInteractionType.MANAGEMENT,
event_type=SentryAppEventType.INSTALLATION_CREATE,
).capture() as lifecycle:
with transaction.atomic(router.db_for_write(ApiGrant)):
api_grant = self._create_api_grant()
install = self._create_install(api_grant=api_grant)
lifecycle.add_extra("installation_id", install.id)
self.audit(request=request)
self._create_service_hooks(install=install)
install.is_new = True
if self.notify:
installation_webhook.delay(install.id, user.id)
self.record_analytics(user=user)
return install
def _create_install(self, api_grant: ApiGrant) -> SentryAppInstallation:
status = SentryAppInstallationStatus.PENDING
if not self.sentry_app.verify_install:
status = SentryAppInstallationStatus.INSTALLED
return SentryAppInstallation.objects.create(
organization_id=self.organization_id,
sentry_app_id=self.sentry_app.id,
api_grant_id=api_grant.id,
status=status,
)
def _create_api_grant(self) -> ApiGrant:
return ApiGrant.objects.create(
user_id=self.sentry_app.proxy_user_id, application_id=self.sentry_app.application_id
)
def _create_service_hooks(self, install: SentryAppInstallation) -> None:
# only make the service hook if there is a webhook url
if self.sentry_app.webhook_url:
hook_service.create_service_hook(
application_id=self.sentry_app.application_id,
actor_id=install.id,
installation_id=install.id,
organization_id=self.organization_id,
events=self.sentry_app.events,
url=self.sentry_app.webhook_url,
)
def audit(self, request: HttpRequest | None) -> None:
from sentry.utils.audit import create_audit_entry
if request:
create_audit_entry(
request=request,
organization_id=self.organization_id,
target_object=self.organization_id,
event=audit_log.get_event_id("SENTRY_APP_INSTALL"),
data={"sentry_app": self.sentry_app.name},
)
def record_analytics(self, user: User | RpcUser) -> None:
analytics.record(
SentryAppInstalledEvent(
user_id=user.id,
organization_id=self.organization_id,
sentry_app=self.slug,
)
)
metrics.incr("sentry_apps.installation.success")
@cached_property
def sentry_app(self) -> SentryApp:
return SentryApp.objects.get(slug=self.slug)
| SentryAppInstallationCreator |
python | doocs__leetcode | solution/1000-1099/1036.Escape a Large Maze/Solution.py | {
"start": 0,
"end": 810
} | class ____:
def isEscapePossible(
self, blocked: List[List[int]], source: List[int], target: List[int]
) -> bool:
def dfs(source: List[int], target: List[int], vis: set) -> bool:
vis.add(tuple(source))
if len(vis) > m:
return True
for a, b in pairwise(dirs):
x, y = source[0] + a, source[1] + b
if 0 <= x < n and 0 <= y < n and (x, y) not in s and (x, y) not in vis:
if [x, y] == target or dfs([x, y], target, vis):
return True
return False
s = {(x, y) for x, y in blocked}
dirs = (-1, 0, 1, 0, -1)
n = 10**6
m = len(blocked) ** 2 // 2
return dfs(source, target, set()) and dfs(target, source, set())
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/asset_backfill.py | {
"start": 28178,
"end": 95415
} | class ____(NamedTuple):
run_requests: Sequence[RunRequest]
backfill_data: AssetBackfillData
reserved_run_ids: Sequence[str]
def _get_requested_asset_graph_subset_from_run_requests(
run_requests: Sequence[RunRequest],
asset_graph_view: AssetGraphView,
) -> AssetGraphSubset:
asset_graph = asset_graph_view.asset_graph
requested_subset = AssetGraphSubset.create_empty_subset()
for run_request in run_requests:
# Run request targets a range of partitions
range_start = run_request.tags.get(ASSET_PARTITION_RANGE_START_TAG)
range_end = run_request.tags.get(ASSET_PARTITION_RANGE_END_TAG)
if range_start and range_end:
# When a run request targets a range of partitions, each asset is expected to
# have the same partitions def
selected_assets = cast("Sequence[AssetKey]", run_request.asset_selection)
check.invariant(len(selected_assets) > 0)
partition_range = PartitionKeyRange(range_start, range_end)
entity_subsets = [
asset_graph_view.get_entity_subset_in_range(asset_key, partition_range)
for asset_key in selected_assets
]
requested_subset = requested_subset | AssetGraphSubset.from_entity_subsets(
entity_subsets
)
else:
requested_subset = requested_subset | AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(asset_key, run_request.partition_key)
for asset_key in cast("Sequence[AssetKey]", run_request.asset_selection)
},
asset_graph,
# don't need expensive checks for whether the partition keys are still in the subset
# when just determining what was previously requested in this backfill
validate_time_range=False,
)
return requested_subset
def _write_updated_backfill_data(
instance: DagsterInstance,
backfill_id: str,
updated_backfill_data: AssetBackfillData,
asset_graph: RemoteAssetGraph,
updated_run_requests: Sequence[RunRequest],
updated_reserved_run_ids: Sequence[str],
):
backfill = check.not_none(instance.get_backfill(backfill_id))
updated_backfill = backfill.with_asset_backfill_data(
updated_backfill_data,
dynamic_partitions_store=instance,
asset_graph=asset_graph,
).with_submitting_run_requests(
updated_run_requests,
updated_reserved_run_ids,
)
instance.update_backfill(updated_backfill)
return updated_backfill
async def _submit_runs_and_update_backfill_in_chunks(
asset_graph_view: AssetGraphView,
workspace_process_context: IWorkspaceProcessContext,
backfill_id: str,
asset_backfill_iteration_result: AssetBackfillIterationResult,
logger: logging.Logger,
run_tags: Mapping[str, str],
) -> None:
from dagster._core.execution.backfill import BulkActionStatus
from dagster._daemon.utils import DaemonErrorCapture
asset_graph = cast("RemoteWorkspaceAssetGraph", asset_graph_view.asset_graph)
instance = asset_graph_view.instance
run_requests = asset_backfill_iteration_result.run_requests
# Iterate through runs to request, submitting runs in chunks.
# In between each chunk, check that the backfill is still marked as 'requested',
# to ensure that no more runs are requested if the backfill is marked as canceled/canceling.
updated_backfill_data = asset_backfill_iteration_result.backfill_data
num_submitted = 0
reserved_run_ids = asset_backfill_iteration_result.reserved_run_ids
run_request_execution_data_cache = {}
chunk_size = get_asset_backfill_run_chunk_size()
for run_request_idx, run_request in enumerate(run_requests):
run_id = reserved_run_ids[run_request_idx] if reserved_run_ids else None
try:
# create a new request context for each run in case the code location server
# is swapped out in the middle of the submission process
workspace = workspace_process_context.create_request_context()
await submit_asset_run(
run_id,
run_request._replace(
tags={
**run_request.tags,
**run_tags,
BACKFILL_ID_TAG: backfill_id,
}
),
run_request_idx,
instance,
workspace_process_context,
workspace,
run_request_execution_data_cache,
{},
logger,
)
except Exception:
DaemonErrorCapture.process_exception(
sys.exc_info(),
logger=logger,
log_message="Error while submitting run - updating the backfill data before re-raising",
)
# Write the runs that we submitted before hitting an error
_write_updated_backfill_data(
instance,
backfill_id,
updated_backfill_data,
asset_graph,
run_requests[num_submitted:],
asset_backfill_iteration_result.reserved_run_ids[num_submitted:],
)
raise
num_submitted += 1
updated_backfill_data: AssetBackfillData = (
updated_backfill_data.with_run_requests_submitted(
[run_request],
asset_graph_view,
)
)
# After each chunk or on the final request, write the updated backfill data
# and check to make sure we weren't interrupted
if (num_submitted % chunk_size == 0) or num_submitted == len(run_requests):
backfill = _write_updated_backfill_data(
instance,
backfill_id,
updated_backfill_data,
asset_graph,
run_requests[num_submitted:],
asset_backfill_iteration_result.reserved_run_ids[num_submitted:],
)
if backfill.status != BulkActionStatus.REQUESTED:
break
return
def _check_target_partitions_subset_is_valid(
asset_key: AssetKey,
asset_graph: BaseAssetGraph,
target_partitions_subset: Optional[PartitionsSubset],
instance_queryer: CachingInstanceQueryer,
) -> None:
"""Checks for any partitions definition changes since backfill launch that should mark
the backfill as failed.
"""
if not asset_graph.has(asset_key):
raise DagsterDefinitionChangedDeserializationError(
f"Asset {asset_key} existed at storage-time, but no longer does"
)
partitions_def = asset_graph.get(asset_key).partitions_def
if target_partitions_subset: # Asset was partitioned at storage time
if partitions_def is None:
raise DagsterDefinitionChangedDeserializationError(
f"Asset {asset_key} had a PartitionsDefinition at storage-time, but no longer does"
)
# Check that all target partitions still exist. If so, the backfill can continue.
existent_partitions_subset = (
partitions_def.subset_with_all_partitions() & target_partitions_subset
)
removed_partitions_subset = target_partitions_subset - existent_partitions_subset
if len(removed_partitions_subset) > 0:
raise DagsterDefinitionChangedDeserializationError(
f"Targeted partitions for asset {asset_key} have been removed since this backfill was stored. "
f"The following partitions were removed: {removed_partitions_subset.get_partition_keys()}"
)
else: # Asset unpartitioned at storage time
if partitions_def is not None:
raise DagsterDefinitionChangedDeserializationError(
f"Asset {asset_key} was not partitioned at storage-time, but is now"
)
def _check_asset_backfill_data_validity(
asset_backfill_data: AssetBackfillData,
asset_graph: BaseAssetGraph,
instance_queryer: CachingInstanceQueryer,
) -> None:
for asset_key in asset_backfill_data.target_subset.asset_keys:
_check_target_partitions_subset_is_valid(
asset_key,
asset_graph,
asset_backfill_data.target_subset.get_partitions_subset(asset_key)
if asset_key in asset_backfill_data.target_subset.partitions_subsets_by_asset_key
else None,
instance_queryer,
)
def _check_validity_of_asset_backfill_data_and_should_process_backfill(
workspace_context: BaseWorkspaceRequestContext,
backfill_id: str,
asset_backfill_data: AssetBackfillData,
asset_graph: RemoteWorkspaceAssetGraph,
instance_queryer: CachingInstanceQueryer,
logger: logging.Logger,
) -> bool:
"""Validates if the asset backfill data is valid. If it is not an error will be raised unless
DAGSTER_BACKFILL_RETRY_DEFINITION_CHANGED_ERROR is set, in which case it returns False so that
the backfillis skipped this iteration. Otherwise returns True.
"""
unloadable_locations = _get_unloadable_location_names(workspace_context, logger)
try:
_check_asset_backfill_data_validity(asset_backfill_data, asset_graph, instance_queryer)
except DagsterDefinitionChangedDeserializationError as ex:
unloadable_locations_error = (
"This could be because it's inside a code location that's failing to load:"
f" {unloadable_locations}"
if unloadable_locations
else ""
)
if (
os.environ.get("DAGSTER_BACKFILL_RETRY_DEFINITION_CHANGED_ERROR")
and unloadable_locations
):
logger.warning(
f"Backfill {backfill_id} was unable to continue due to a missing asset or"
" partition in the asset graph. The backfill will resume once it is available"
f" again.\n{ex}. {unloadable_locations_error}"
)
return False
else:
raise DagsterAssetBackfillDataLoadError(f"{ex}. {unloadable_locations_error}")
return True
def backfill_is_complete(
backfill_id: str,
backfill_data: AssetBackfillData,
instance: DagsterInstance,
logger: logging.Logger,
):
"""A backfill is complete when:
1. all asset partitions in the target subset have a materialization state (successful, failed, downstream of a failed partition).
2. there are no in progress runs for the backfill.
3. there are no failed runs that will result in an automatic retry, but have not yet been retried.
Condition 1 ensures that for each asset partition we have attempted to materialize it or have determined we
cannot materialize it because of a failed dependency. Condition 2 ensures that no retries of failed runs are
in progress. Condition 3 guards against a race condition where a failed run could be automatically retried
but it was not added into the queue in time to be caught by condition 2.
Since the AssetBackfillData object stores materialization states per asset partition, we want to ensure the
daemon continues to update the backfill data until all runs have finished in order to display the
final partition statuses in the UI.
"""
# Condition 1 - if any asset partitions in the target subset do not have a materialization state, the backfill
# is not complete
if not backfill_data.all_targeted_partitions_have_materialization_status():
logger.info(
"Not all targeted asset partitions have a materialization status. Backfill is still in progress."
)
return False
# Condition 2 - if there are in progress runs for the backfill, the backfill is not complete
if (
len(
instance.get_run_ids(
filters=RunsFilter(
statuses=NOT_FINISHED_STATUSES,
tags={BACKFILL_ID_TAG: backfill_id},
),
limit=1,
)
)
> 0
):
logger.info("Backfill has in progress runs. Backfill is still in progress.")
return False
# Condition 3 - if there are runs that will be retried, but have not yet been retried, the backfill is not complete
runs_waiting_to_retry = [
run.run_id
for run in instance.get_runs(
filters=RunsFilter(
tags={BACKFILL_ID_TAG: backfill_id, WILL_RETRY_TAG: "true"},
statuses=[DagsterRunStatus.FAILURE],
)
)
if run.is_complete_and_waiting_to_retry
]
if len(runs_waiting_to_retry) > 0:
num_runs_to_log = 20
formatted_runs = "\n".join(runs_waiting_to_retry[:num_runs_to_log])
if len(runs_waiting_to_retry) > num_runs_to_log:
formatted_runs += f"\n... {len(runs_waiting_to_retry) - num_runs_to_log} more"
logger.info(
f"The following runs for the backfill will be retried, but retries have not been launched. Backfill is still in progress:\n{formatted_runs}"
)
return False
return True
async def execute_asset_backfill_iteration(
backfill: "PartitionBackfill",
logger: logging.Logger,
workspace_process_context: IWorkspaceProcessContext,
instance: DagsterInstance,
) -> None:
"""Runs an iteration of the backfill, including submitting runs and updating the backfill object
in the DB.
This is a generator so that we can return control to the daemon and let it heartbeat during
expensive operations.
"""
from dagster._core.execution.backfill import BulkActionStatus, PartitionBackfill
logger.info(f"Evaluating asset backfill {backfill.backfill_id}")
workspace_context = workspace_process_context.create_request_context()
asset_graph = workspace_context.asset_graph
if not backfill.is_asset_backfill:
check.failed("Backfill must be an asset backfill")
backfill_start_datetime = datetime_from_timestamp(backfill.backfill_timestamp)
asset_graph_view = AssetGraphView(
temporal_context=TemporalContext(
effective_dt=backfill_start_datetime,
last_event_id=None,
),
instance=instance,
asset_graph=asset_graph,
)
instance_queryer = asset_graph_view.get_inner_queryer_for_back_compat()
previous_asset_backfill_data = backfill.get_asset_backfill_data(asset_graph)
if backfill.status == BulkActionStatus.REQUESTED:
should_process_backfill = (
_check_validity_of_asset_backfill_data_and_should_process_backfill(
workspace_context,
backfill.backfill_id,
previous_asset_backfill_data,
asset_graph,
instance_queryer,
logger,
)
)
if not should_process_backfill:
return
logger.info(
f"Assets targeted by backfill {backfill.backfill_id} are valid. Continuing execution with current status: {backfill.status}."
)
if backfill.submitting_run_requests:
# interrupted in the middle of executing run requests - re-construct the in-progress iteration result
logger.warn(
f"Resuming previous backfill iteration and re-submitting {len(backfill.submitting_run_requests)} runs."
)
result = AssetBackfillIterationResult(
run_requests=backfill.submitting_run_requests,
backfill_data=previous_asset_backfill_data,
reserved_run_ids=backfill.reserved_run_ids,
)
updated_backfill = backfill
else:
# Generate a new set of run requests to launch, and update the materialized and failed
# subsets
result = execute_asset_backfill_iteration_inner(
backfill_id=backfill.backfill_id,
asset_backfill_data=previous_asset_backfill_data,
asset_graph_view=asset_graph_view,
backfill_start_timestamp=backfill.backfill_timestamp,
logger=logger,
run_config=backfill.run_config,
)
# Write the updated asset backfill data with in progress run requests before we launch anything, for idempotency
# Make sure we didn't get canceled in the interim
updated_backfill: PartitionBackfill = check.not_none(
instance.get_backfill(backfill.backfill_id)
)
if updated_backfill.status != BulkActionStatus.REQUESTED:
logger.info("Backfill was canceled mid-iteration, returning")
return
updated_backfill = (
updated_backfill.with_asset_backfill_data(
result.backfill_data,
dynamic_partitions_store=instance,
asset_graph=asset_graph,
)
.with_submitting_run_requests(result.run_requests, result.reserved_run_ids)
.with_failure_count(0)
)
instance.update_backfill(updated_backfill)
if result.run_requests:
await _submit_runs_and_update_backfill_in_chunks(
asset_graph_view,
workspace_process_context,
updated_backfill.backfill_id,
result,
logger,
run_tags=updated_backfill.tags,
)
updated_backfill = cast(
"PartitionBackfill", instance.get_backfill(updated_backfill.backfill_id)
)
if updated_backfill.status == BulkActionStatus.REQUESTED:
check.invariant(
not updated_backfill.submitting_run_requests,
"All run requests should have been submitted",
)
updated_backfill_data = updated_backfill.get_asset_backfill_data(asset_graph)
if backfill_is_complete(
backfill_id=backfill.backfill_id,
backfill_data=updated_backfill_data,
instance=instance,
logger=logger,
):
if (
updated_backfill_data.failed_and_downstream_subset.num_partitions_and_non_partitioned_assets
> 0
):
updated_backfill = updated_backfill.with_status(BulkActionStatus.COMPLETED_FAILED)
else:
updated_backfill: PartitionBackfill = updated_backfill.with_status(
BulkActionStatus.COMPLETED_SUCCESS
)
updated_backfill = updated_backfill.with_end_timestamp(get_current_timestamp())
instance.update_backfill(updated_backfill)
new_materialized_partitions = (
updated_backfill_data.materialized_subset
- previous_asset_backfill_data.materialized_subset
)
new_failed_partitions = (
updated_backfill_data.failed_and_downstream_subset
- previous_asset_backfill_data.failed_and_downstream_subset
)
updated_backfill_in_progress = updated_backfill_data.requested_subset - (
updated_backfill_data.materialized_subset
| updated_backfill_data.failed_and_downstream_subset
)
previous_backfill_in_progress = (
previous_asset_backfill_data.requested_subset
- previous_asset_backfill_data.materialized_subset
)
new_requested_partitions = updated_backfill_in_progress - previous_backfill_in_progress
logger.info(
f"Asset backfill {updated_backfill.backfill_id} completed iteration with status {updated_backfill.status}."
)
logger.info(
"Backfill iteration summary:\n"
f"**Assets materialized since last iteration:**\n{_asset_graph_subset_to_str(new_materialized_partitions, asset_graph) if new_materialized_partitions.num_partitions_and_non_partitioned_assets > 0 else 'None'}\n"
f"**Assets failed since last iteration and their downstream assets:**\n{_asset_graph_subset_to_str(new_failed_partitions, asset_graph) if new_failed_partitions.num_partitions_and_non_partitioned_assets > 0 else 'None'}\n"
f"**Assets requested by this iteration:**\n{_asset_graph_subset_to_str(new_requested_partitions, asset_graph) if new_requested_partitions.num_partitions_and_non_partitioned_assets > 0 else 'None'}\n"
)
logger.info(
"Overall backfill status:\n"
f"**Materialized assets:**\n{_asset_graph_subset_to_str(updated_backfill_data.materialized_subset, asset_graph) if updated_backfill_data.materialized_subset.num_partitions_and_non_partitioned_assets > 0 else 'None'}\n"
f"**Failed assets and their downstream assets:**\n{_asset_graph_subset_to_str(updated_backfill_data.failed_and_downstream_subset, asset_graph) if updated_backfill_data.failed_and_downstream_subset.num_partitions_and_non_partitioned_assets > 0 else 'None'}\n"
f"**Assets requested or in progress:**\n{_asset_graph_subset_to_str(updated_backfill_in_progress, asset_graph) if updated_backfill_in_progress.num_partitions_and_non_partitioned_assets > 0 else 'None'}\n"
)
logger.debug(
f"Updated asset backfill data for {updated_backfill.backfill_id}: {updated_backfill_data}"
)
elif (
backfill.status == BulkActionStatus.CANCELING or backfill.status == BulkActionStatus.FAILING
):
from dagster._core.execution.backfill import cancel_backfill_runs_and_cancellation_complete
status_once_runs_are_complete = (
BulkActionStatus.CANCELED
if backfill.status == BulkActionStatus.CANCELING
else BulkActionStatus.FAILED
)
all_runs_canceled = cancel_backfill_runs_and_cancellation_complete(
instance=instance,
backfill_id=backfill.backfill_id,
logger=logger,
)
try:
# Update the asset backfill data to contain the newly materialized/failed partitions.
updated_asset_backfill_data = get_canceling_asset_backfill_iteration_data(
backfill.backfill_id,
previous_asset_backfill_data,
asset_graph_view,
backfill.backfill_timestamp,
)
# Refetch, in case the backfill was forcibly marked as canceled/failed in the meantime
backfill = cast("PartitionBackfill", instance.get_backfill(backfill.backfill_id))
updated_backfill: PartitionBackfill = backfill.with_asset_backfill_data(
updated_asset_backfill_data,
dynamic_partitions_store=instance,
asset_graph=asset_graph,
)
# The asset backfill is successfully canceled when all requested runs have finished (success,
# failure, or cancellation). Since the AssetBackfillData object stores materialization states
# per asset partition, the daemon continues to update the backfill data until all runs have
# finished in order to display the final partition statuses in the UI.
except Exception as e:
logger.warning(
f"Error updating asset backfill data for backfill {backfill.backfill_id} when canceling runs. "
"If all runs for this backfill have finished, the backfill will be marked as completed without updating "
f"the individual asset partition statuses. Error: {e}"
)
# Refetch, in case the backfill was forcibly marked as canceled/failed in the meantime
updated_backfill = cast(
"PartitionBackfill", instance.get_backfill(backfill.backfill_id)
)
asset_backfill_data_after_iteration = backfill.get_asset_backfill_data(asset_graph)
all_partitions_marked_completed = asset_backfill_data_after_iteration.all_requested_partitions_marked_as_materialized_or_failed()
if all_partitions_marked_completed:
updated_backfill = updated_backfill.with_status(
status_once_runs_are_complete
).with_end_timestamp(get_current_timestamp())
if all_runs_canceled and not all_partitions_marked_completed:
logger.warning(
"All runs have completed, but not all requested partitions have been marked as materialized or failed. "
"This may indicate that some runs succeeded without materializing their expected partitions."
)
updated_backfill = updated_backfill.with_status(
status_once_runs_are_complete
).with_end_timestamp(get_current_timestamp())
instance.update_backfill(updated_backfill)
logger.info(
f"Asset backfill {backfill.backfill_id} completed cancellation iteration with status {updated_backfill.status}."
)
logger.debug(
f"Updated asset backfill data after cancellation iteration: {asset_backfill_data_after_iteration}"
)
elif backfill.status == BulkActionStatus.CANCELED:
# The backfill was forcibly canceled, skip iteration
pass
else:
check.failed(f"Unexpected backfill status: {backfill.status}")
def get_canceling_asset_backfill_iteration_data(
backfill_id: str,
asset_backfill_data: AssetBackfillData,
asset_graph_view: AssetGraphView,
backfill_start_timestamp: float,
) -> AssetBackfillData:
"""For asset backfills in the "canceling" state, fetch the asset backfill data with the updated
materialized and failed subsets.
"""
asset_graph = cast("RemoteWorkspaceAssetGraph", asset_graph_view.asset_graph)
instance_queryer = asset_graph_view.get_inner_queryer_for_back_compat()
updated_materialized_subset = None
updated_materialized_subset = get_asset_backfill_iteration_materialized_subset(
backfill_id, asset_backfill_data, asset_graph, instance_queryer
)
failed_subset = _get_failed_asset_graph_subset(
asset_graph_view,
backfill_id,
materialized_subset=updated_materialized_subset,
)
# we fetch the failed_subset to get any new assets that have failed and add that to the set of
# assets we already know failed and their downstreams. However we need to remove any assets in
# updated_materialized_subset to account for the case where a run retry successfully
# materialized a previously failed asset.
updated_failed_subset = (
asset_backfill_data.failed_and_downstream_subset | failed_subset
) - updated_materialized_subset
return AssetBackfillData(
target_subset=asset_backfill_data.target_subset,
latest_storage_id=asset_backfill_data.latest_storage_id,
requested_runs_for_target_roots=asset_backfill_data.requested_runs_for_target_roots,
materialized_subset=updated_materialized_subset,
failed_and_downstream_subset=updated_failed_subset,
requested_subset=asset_backfill_data.requested_subset,
backfill_start_time=TimestampWithTimezone(backfill_start_timestamp, "UTC"),
)
def get_asset_backfill_iteration_materialized_subset(
backfill_id: str,
asset_backfill_data: AssetBackfillData,
asset_graph: RemoteWorkspaceAssetGraph,
instance_queryer: CachingInstanceQueryer,
) -> AssetGraphSubset:
"""Returns the partitions that have been materialized by the backfill.
This function is a generator so we can return control to the daemon and let it heartbeat
during expensive operations.
"""
recently_materialized_asset_partitions = AssetGraphSubset()
for asset_key in asset_backfill_data.target_subset.asset_keys:
cursor = None
has_more = True
while has_more:
materializations_result = instance_queryer.instance.fetch_materializations(
AssetRecordsFilter(
asset_key=asset_key,
after_storage_id=asset_backfill_data.latest_storage_id,
),
cursor=cursor,
limit=MATERIALIZATION_CHUNK_SIZE,
)
cursor = materializations_result.cursor
has_more = materializations_result.has_more
run_ids = [record.run_id for record in materializations_result.records if record.run_id]
if run_ids:
run_records = instance_queryer.instance.get_run_records(
filters=RunsFilter(run_ids=run_ids),
)
run_ids_in_backfill = {
run_record.dagster_run.run_id
for run_record in run_records
if run_record.dagster_run.tags.get(BACKFILL_ID_TAG) == backfill_id
}
materialization_records_in_backfill = [
record
for record in materializations_result.records
if record.run_id in run_ids_in_backfill
]
recently_materialized_asset_partitions |= AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(asset_key, record.partition_key)
for record in materialization_records_in_backfill
},
asset_graph,
)
updated_materialized_subset = (
asset_backfill_data.materialized_subset | recently_materialized_asset_partitions
)
return updated_materialized_subset
def _get_subset_in_target_subset(
asset_graph_view: AssetGraphView,
candidate_asset_graph_subset: AssetGraphSubset,
target_subset: AssetGraphSubset,
) -> "AssetGraphSubset":
candidate_entity_subsets = list(
asset_graph_view.iterate_asset_subsets(candidate_asset_graph_subset)
)
assert len(candidate_entity_subsets) == 1, (
"Since include_execution_set=False, there should be exactly one candidate entity subset"
)
candidate_entity_subset = next(iter(candidate_entity_subsets))
subset_in_target_subset: EntitySubset[AssetKey] = candidate_entity_subset.compute_intersection(
asset_graph_view.get_entity_subset_from_asset_graph_subset(
target_subset, candidate_entity_subset.key
)
)
return AssetGraphSubset.from_entity_subsets([subset_in_target_subset])
def _get_failed_and_downstream_asset_graph_subset(
backfill_id: str,
asset_backfill_data: AssetBackfillData,
asset_graph_view: AssetGraphView,
materialized_subset: AssetGraphSubset,
failed_asset_graph_subset: AssetGraphSubset,
) -> AssetGraphSubset:
failed_and_downstream_subset = bfs_filter_asset_graph_view(
asset_graph_view,
lambda candidate_asset_graph_subset, _: (
AssetGraphViewBfsFilterConditionResult(
passed_asset_graph_subset=_get_subset_in_target_subset(
asset_graph_view,
candidate_asset_graph_subset,
asset_backfill_data.target_subset,
),
excluded_asset_graph_subsets_and_reasons=[],
)
),
initial_asset_graph_subset=failed_asset_graph_subset,
include_full_execution_set=False,
)[0]
return failed_and_downstream_subset
def _get_next_latest_storage_id(instance_queryer: CachingInstanceQueryer) -> int:
# Events are not always guaranteed to be written to the event log in monotonically increasing
# order, so add a configurable offset to ensure that any stragglers will still be included in
# the next iteration.
# This may result in the same event being considered within multiple iterations, but
# idempotence checks later ensure that the materialization isn't incorrectly
# double-counted.
cursor_offset = int(os.getenv("ASSET_BACKFILL_CURSOR_OFFSET", "0"))
next_latest_storage_id = (
instance_queryer.instance.event_log_storage.get_maximum_record_id() or 0
)
return max(next_latest_storage_id - cursor_offset, 0)
def _partition_subset_str(
partition_subset: PartitionsSubset,
partitions_def: PartitionsDefinition,
):
if isinstance(partition_subset, TimeWindowPartitionsSubset) and isinstance(
partitions_def, TimeWindowPartitionsDefinition
):
time_window_strs = []
for time_window in partition_subset.included_time_windows:
partition_key_range = partitions_def.get_partition_key_range_for_time_window(
time_window.to_public_time_window(), respect_bounds=False
)
num_partitions = partitions_def.get_num_partitions_in_window(
time_window.to_public_time_window()
)
if num_partitions == 1:
time_window_strs.append(f"1 partition: {partition_key_range.start}")
else:
time_window_strs.append(
f"{num_partitions} partitions: {partition_key_range.start} -> {partition_key_range.end}"
)
return ", ".join(time_window_strs)
return ", ".join(partition_subset.get_partition_keys())
def _asset_graph_subset_to_str(
asset_graph_subset: AssetGraphSubset,
asset_graph: BaseAssetGraph,
) -> str:
return_strs = []
asset_subsets = asset_graph_subset.iterate_asset_subsets()
for subset in sorted(asset_subsets, key=lambda x: x.key):
if subset.is_partitioned:
partitions_def = asset_graph.get(subset.key).partitions_def
partition_ranges_str = _partition_subset_str(subset.subset_value, partitions_def)
return_strs.append(f"- {subset.key.to_user_string()}: {{{partition_ranges_str}}}")
else:
return_strs.append(f"- {subset.key.to_user_string()}")
return "\n".join(return_strs)
def execute_asset_backfill_iteration_inner(
backfill_id: str,
asset_backfill_data: AssetBackfillData,
asset_graph_view: AssetGraphView,
backfill_start_timestamp: float,
logger: logging.Logger,
run_config: Optional[Mapping[str, Any]],
) -> AssetBackfillIterationResult:
"""Core logic of a backfill iteration. Has no side effects.
Computes which runs should be requested, if any, as well as updated bookkeeping about the status
of asset partitions targeted by the backfill.
This is a generator so that we can return control to the daemon and let it heartbeat during
expensive operations.
"""
# ensures that all partition operations use the same effective_dt and share a dynamic partition cache
with partition_loading_context(
effective_dt=asset_graph_view.effective_dt,
dynamic_partitions_store=asset_graph_view.get_inner_queryer_for_back_compat(),
):
return _execute_asset_backfill_iteration_inner(
backfill_id,
asset_backfill_data,
asset_graph_view,
backfill_start_timestamp,
logger,
run_config,
)
def _get_candidate_asset_graph_subset(
asset_backfill_data: AssetBackfillData,
asset_graph_view: AssetGraphView,
materialized_asset_graph_subset: AssetGraphSubset,
failed_asset_graph_subset: AssetGraphSubset,
):
materialized_keys = materialized_asset_graph_subset.asset_keys
parent_materialized_keys = set().union(
*(asset_graph_view.asset_graph.get(k).child_keys for k in materialized_keys)
)
failed_keys = failed_asset_graph_subset.asset_keys
parent_failed_keys = set().union(
*(asset_graph_view.asset_graph.get(k).child_keys for k in failed_keys)
)
child_subsets = []
for asset_key in parent_materialized_keys | parent_failed_keys:
child_subsets.append(
asset_graph_view.get_entity_subset_from_asset_graph_subset(
asset_backfill_data.target_subset, asset_key
).compute_difference(
asset_graph_view.get_entity_subset_from_asset_graph_subset(
asset_backfill_data.requested_subset, asset_key
)
)
)
return AssetGraphSubset.from_entity_subsets(child_subsets)
def _execute_asset_backfill_iteration_inner(
backfill_id: str,
asset_backfill_data: AssetBackfillData,
asset_graph_view: AssetGraphView,
backfill_start_timestamp: float,
logger: logging.Logger,
run_config: Optional[Mapping[str, Any]],
) -> AssetBackfillIterationResult:
instance_queryer = asset_graph_view.get_inner_queryer_for_back_compat()
asset_graph: RemoteWorkspaceAssetGraph = cast(
"RemoteWorkspaceAssetGraph", asset_graph_view.asset_graph
)
request_roots = not asset_backfill_data.requested_runs_for_target_roots
if request_roots:
logger.info(
"Not all root assets (assets in backfill that do not have parents in the backill) have been requested, finding root assets."
)
target_roots = asset_backfill_data.get_target_root_asset_graph_subset(asset_graph_view)
candidate_asset_graph_subset = target_roots
logger.info(
f"Root assets that have not yet been requested:\n{_asset_graph_subset_to_str(target_roots, asset_graph)}"
)
updated_materialized_subset = AssetGraphSubset()
failed_and_downstream_subset = AssetGraphSubset()
next_latest_storage_id = _get_next_latest_storage_id(instance_queryer)
else:
next_latest_storage_id = _get_next_latest_storage_id(instance_queryer)
cursor_delay_time = int(os.getenv("ASSET_BACKFILL_CURSOR_DELAY_TIME", "0"))
# Events are not guaranteed to be written to the event log in monotonic increasing order,
# so we wait to ensure all events up until next_latest_storage_id have been written.
if cursor_delay_time:
time.sleep(cursor_delay_time)
updated_materialized_subset = get_asset_backfill_iteration_materialized_subset(
backfill_id, asset_backfill_data, asset_graph, instance_queryer
)
materialized_since_last_tick = (
updated_materialized_subset - asset_backfill_data.materialized_subset
)
logger.info(
f"Assets materialized since last tick:\n{_asset_graph_subset_to_str(materialized_since_last_tick, asset_graph)}"
if not materialized_since_last_tick.is_empty
else "No relevant assets materialized since last tick."
)
failed_asset_graph_subset = _get_failed_asset_graph_subset(
asset_graph_view,
backfill_id,
updated_materialized_subset,
)
candidate_asset_graph_subset = _get_candidate_asset_graph_subset(
asset_backfill_data,
asset_graph_view,
updated_materialized_subset,
failed_asset_graph_subset,
)
failed_and_downstream_subset = _get_failed_and_downstream_asset_graph_subset(
backfill_id,
asset_backfill_data,
asset_graph_view,
updated_materialized_subset,
failed_asset_graph_subset,
)
logger.info(
f"Considering the following candidate subset:\n{_asset_graph_subset_to_str(candidate_asset_graph_subset, asset_graph)}"
if not candidate_asset_graph_subset.is_empty
else "Candidate subset is empty."
)
asset_subset_to_request, not_requested_and_reasons = bfs_filter_asset_graph_view(
asset_graph_view,
lambda candidate_asset_graph_subset,
visited: _should_backfill_atomic_asset_graph_subset_unit(
asset_graph_view=asset_graph_view,
candidate_asset_graph_subset_unit=candidate_asset_graph_subset,
asset_graph_subset_matched_so_far=visited,
materialized_subset=updated_materialized_subset,
requested_subset=asset_backfill_data.requested_subset,
target_subset=asset_backfill_data.target_subset,
failed_and_downstream_subset=failed_and_downstream_subset,
logger=logger,
),
initial_asset_graph_subset=candidate_asset_graph_subset,
include_full_execution_set=True,
# Don't need to consider self-dependant child subsets since the full set that we care about is already included in the candidate subset
traverse_self_dependent_assets=False,
)
logger.info(
f"Asset partitions to request:\n{_asset_graph_subset_to_str(asset_subset_to_request, asset_graph)}"
if not asset_subset_to_request.is_empty
else "No asset partitions to request."
)
asset_partitions_to_request = set(asset_subset_to_request.iterate_asset_partitions())
if len(not_requested_and_reasons) > 0:
not_requested_str = "\n\n".join(
[
f"{_asset_graph_subset_to_str(asset_graph_subset, asset_graph)}\nReason: {reason}"
for asset_graph_subset, reason in not_requested_and_reasons
]
)
logger.info(
f"The following assets were considered for materialization but not requested:\n\n{not_requested_str}"
)
run_requests = [
rr._replace(run_config=run_config)
for rr in build_run_requests_with_backfill_policies(
asset_partitions=asset_partitions_to_request,
asset_graph=asset_graph,
dynamic_partitions_store=instance_queryer,
)
]
if request_roots:
check.invariant(
len(run_requests) > 0,
"At least one run should be requested on first backfill iteration",
)
updated_asset_backfill_data = AssetBackfillData(
target_subset=asset_backfill_data.target_subset,
latest_storage_id=next_latest_storage_id or asset_backfill_data.latest_storage_id,
requested_runs_for_target_roots=asset_backfill_data.requested_runs_for_target_roots
or request_roots,
materialized_subset=updated_materialized_subset,
failed_and_downstream_subset=failed_and_downstream_subset,
requested_subset=asset_backfill_data.requested_subset,
backfill_start_time=TimestampWithTimezone(backfill_start_timestamp, "UTC"),
)
return AssetBackfillIterationResult(
run_requests,
updated_asset_backfill_data,
reserved_run_ids=[make_new_run_id() for _ in range(len(run_requests))],
)
def _should_backfill_atomic_asset_subset_unit(
asset_graph_view: AssetGraphView,
entity_subset_to_filter: EntitySubset[AssetKey],
candidate_asset_graph_subset_unit: AssetGraphSubset,
asset_graph_subset_matched_so_far: AssetGraphSubset,
target_subset: AssetGraphSubset,
requested_subset: AssetGraphSubset,
materialized_subset: AssetGraphSubset,
failed_and_downstream_subset: AssetGraphSubset,
logger: logging.Logger,
) -> tuple[SerializableEntitySubset[AssetKey], Iterable[tuple[EntitySubsetValue, str]]]:
failure_subsets_with_reasons: list[tuple[EntitySubsetValue, str]] = []
asset_graph = asset_graph_view.asset_graph
asset_key = entity_subset_to_filter.key
missing_in_target_partitions = entity_subset_to_filter.compute_difference(
asset_graph_view.get_entity_subset_from_asset_graph_subset(target_subset, asset_key)
)
if not missing_in_target_partitions.is_empty:
# Don't include a failure reason for this subset since it is unlikely to be
# useful to know that an untargeted subset was not included
entity_subset_to_filter = entity_subset_to_filter.compute_difference(
missing_in_target_partitions
)
failed_and_downstream_partitions = entity_subset_to_filter.compute_intersection(
asset_graph_view.get_entity_subset_from_asset_graph_subset(
failed_and_downstream_subset, asset_key
)
)
if not failed_and_downstream_partitions.is_empty:
# Similar to above, only include a failure reason for 'interesting' failure reasons
entity_subset_to_filter = entity_subset_to_filter.compute_difference(
failed_and_downstream_partitions
)
materialized_partitions = entity_subset_to_filter.compute_intersection(
asset_graph_view.get_entity_subset_from_asset_graph_subset(materialized_subset, asset_key)
)
if not materialized_partitions.is_empty:
# Similar to above, only include a failure reason for 'interesting' failure reasons
entity_subset_to_filter = entity_subset_to_filter.compute_difference(
materialized_partitions
)
requested_partitions = entity_subset_to_filter.compute_intersection(
asset_graph_view.get_entity_subset_from_asset_graph_subset(requested_subset, asset_key)
)
if not requested_partitions.is_empty:
# Similar to above, only include a failure reason for 'interesting' failure reasons
entity_subset_to_filter = entity_subset_to_filter.compute_difference(requested_partitions)
has_any_parent_being_requested_this_tick = any(
not asset_graph_view.get_entity_subset_from_asset_graph_subset(
asset_graph_subset_matched_so_far, parent_key
).is_empty
for parent_key in asset_graph.get(asset_key).parent_keys
)
for parent_key in sorted(asset_graph.get(asset_key).parent_keys):
if entity_subset_to_filter.is_empty:
break
parent_subset, required_but_nonexistent_subset = (
asset_graph_view.compute_parent_subset_and_required_but_nonexistent_subset(
parent_key,
entity_subset_to_filter,
)
)
if not required_but_nonexistent_subset.is_empty:
raise DagsterInvariantViolationError(
f"Asset partition subset {entity_subset_to_filter}"
f" depends on non-existent partitions {required_but_nonexistent_subset}"
)
parent_materialized_subset = asset_graph_view.get_entity_subset_from_asset_graph_subset(
materialized_subset, parent_key
)
# Children with parents that are targeted but not materialized are eligible
# to be filtered out if the parent has not run yet
targeted_but_not_materialized_parent_subset: EntitySubset[AssetKey] = (
parent_subset.compute_intersection(
asset_graph_view.get_entity_subset_from_asset_graph_subset(
target_subset, parent_key
)
)
).compute_difference(parent_materialized_subset)
possibly_waiting_for_parent_subset = (
asset_graph_view.compute_child_subset(
asset_key, targeted_but_not_materialized_parent_subset
)
).compute_intersection(entity_subset_to_filter)
parent_being_requested_this_tick_subset = (
asset_graph_view.get_entity_subset_from_asset_graph_subset(
asset_graph_subset_matched_so_far, parent_key
)
)
if not possibly_waiting_for_parent_subset.is_empty:
cant_run_because_of_parent_reason = _get_cant_run_because_of_parent_reason(
targeted_but_not_materialized_parent_subset,
entity_subset_to_filter,
asset_graph_view,
target_subset,
parent_being_requested_this_tick_subset,
candidate_asset_graph_subset_unit,
parent_materialized_subset,
logger,
)
is_self_dependency = parent_key == asset_key
if cant_run_because_of_parent_reason is not None:
# if any parents are also being requested this tick and there is any reason to
# believe that any parent can't be materialized with its child subset, then filter out
# the whole child subset for now, to ensure that the parent and child aren't submitted
# with different subsets which would incorrectly launch them in different runs
# despite the child depending on the parent. Otherwise, we can just filter out the
# specific ineligible child keys (to ensure that they aren't required before
# their parents materialize)
if not is_self_dependency and has_any_parent_being_requested_this_tick:
failure_subsets_with_reasons.append(
(
entity_subset_to_filter.get_internal_value(),
cant_run_because_of_parent_reason,
)
)
entity_subset_to_filter = asset_graph_view.get_empty_subset(
key=entity_subset_to_filter.key
)
else:
entity_subset_to_filter = entity_subset_to_filter.compute_difference(
possibly_waiting_for_parent_subset
)
failure_subsets_with_reasons.append(
(
possibly_waiting_for_parent_subset.get_internal_value(),
cant_run_because_of_parent_reason,
)
)
if is_self_dependency:
self_dependent_node = asset_graph.get(asset_key)
# ensure that we don't produce more than max_partitions_per_run partitions
# if a backfill policy is set
if (
self_dependent_node.backfill_policy is not None
and self_dependent_node.backfill_policy.max_partitions_per_run is not None
):
# only the first N partitions can be requested
num_allowed_partitions = (
self_dependent_node.backfill_policy.max_partitions_per_run
)
# TODO add a method for paginating through the keys in order
# and returning the first N instead of listing all of them
# (can't use expensively_compute_asset_partitions because it returns
# an unordered set)
internal_value = entity_subset_to_filter.get_internal_value()
partition_keys_to_include = (
list(internal_value.get_partition_keys())
if isinstance(internal_value, PartitionsSubset)
else [None]
)[:num_allowed_partitions]
partition_subset_to_include = AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(self_dependent_node.key, partition_key)
for partition_key in partition_keys_to_include
},
asset_graph=asset_graph,
)
entity_subset_to_include = (
asset_graph_view.get_entity_subset_from_asset_graph_subset(
partition_subset_to_include, self_dependent_node.key
)
)
entity_subset_to_reject = entity_subset_to_filter.compute_difference(
entity_subset_to_include
)
if not entity_subset_to_reject.is_empty:
failure_subsets_with_reasons.append(
(
entity_subset_to_reject.get_internal_value(),
"Respecting the maximum number of partitions per run for the backfill policy of a self-dependant asset",
)
)
entity_subset_to_filter = entity_subset_to_include
return (
entity_subset_to_filter.convert_to_serializable_subset(),
failure_subsets_with_reasons,
)
def _get_cant_run_because_of_parent_reason(
parent_subset: EntitySubset[AssetKey],
entity_subset_to_filter: EntitySubset[AssetKey],
asset_graph_view: AssetGraphView,
target_subset: AssetGraphSubset,
parent_being_requested_this_tick_subset: EntitySubset[AssetKey],
candidate_asset_graph_subset_unit: AssetGraphSubset,
parent_materialized_subset: EntitySubset[AssetKey],
logger: logging.Logger,
) -> Optional[str]:
candidate_asset_key = entity_subset_to_filter.key
parent_asset_key = parent_subset.key
assert isinstance(asset_graph_view.asset_graph, RemoteWorkspaceAssetGraph)
asset_graph = cast("RemoteWorkspaceAssetGraph", asset_graph_view.asset_graph)
parent_node = asset_graph.get(parent_asset_key)
candidate_node = asset_graph.get(candidate_asset_key)
partition_mapping = asset_graph.get_partition_mapping(
candidate_asset_key, parent_asset_key=parent_asset_key
)
is_self_dependency = parent_asset_key == candidate_asset_key
# first handle the common case where the parent hasn't even been materialized yet, or is
# currently being materialized but not requesting the right partitions
if not (
# this check is here to guard against cases where the parent asset has a superset of
# the child asset's asset partitions, which will mean that the runs that would be created
# would not combine the parent and child assets into a single run. this is not relevant
# for self-dependencies, because the parent and child are the same asset.
is_self_dependency
or (
# in the typical case, we will only allow this candidate subset to be requested if
# it contains exactly the same partitions as its parent asset for this evaluation,
# otherwise they may end up in different runs
parent_being_requested_this_tick_subset.get_internal_value()
== entity_subset_to_filter.get_internal_value()
)
or (
# for non-subsettable multi-assets, we will not have yet requested the parent asset
# partitions, so we just check that we have a matching set of partitions
asset_graph_view.get_entity_subset_from_asset_graph_subset(
candidate_asset_graph_subset_unit, parent_asset_key
).get_internal_value()
== entity_subset_to_filter.get_internal_value()
)
):
if (
len(candidate_asset_graph_subset_unit.asset_keys) == 1
and parent_being_requested_this_tick_subset.is_empty
):
return f"Waiting for parent {parent_node.key.to_user_string()} to be materialized."
return (
f"parent {parent_node.key.to_user_string()} is requesting a different set of partitions from "
f"{candidate_node.key.to_user_string()}, meaning they cannot be grouped together in the same run."
)
# Then filter out cases where even if the parent was requested this iteration, it wouldn't
# matter, because the parent and child can't execute in the same run
# checks if there is a simple partition mapping between the parent and the child
has_identity_partition_mapping = (
# both unpartitioned
(not candidate_node.is_partitioned and not parent_node.is_partitioned)
# normal identity partition mapping
or isinstance(partition_mapping, IdentityPartitionMapping)
# for assets with the same time partitions definition, a non-offset partition
# mapping functions as an identity partition mapping
or (
isinstance(partition_mapping, TimeWindowPartitionMapping)
and partition_mapping.start_offset == 0
and partition_mapping.end_offset == 0
)
)
if parent_node.backfill_policy != candidate_node.backfill_policy:
return f"parent {parent_node.key.to_user_string()} and {candidate_node.key.to_user_string()} have different backfill policies so they cannot be materialized in the same run. {candidate_node.key.to_user_string()} can be materialized once {parent_node.key} is materialized."
if (
parent_node.resolve_to_singular_repo_scoped_node().repository_handle
!= candidate_node.resolve_to_singular_repo_scoped_node().repository_handle
):
return f"parent {parent_node.key.to_user_string()} and {candidate_node.key.to_user_string()} are in different code locations so they cannot be materialized in the same run. {candidate_node.key.to_user_string()} can be materialized once {parent_node.key.to_user_string()} is materialized."
if parent_node.partitions_def != candidate_node.partitions_def:
return f"parent {parent_node.key.to_user_string()} and {candidate_node.key.to_user_string()} have different partitions definitions so they cannot be materialized in the same run. {candidate_node.key.to_user_string()} can be materialized once {parent_node.key.to_user_string()} is materialized."
parent_target_subset = target_subset.get_asset_subset(parent_asset_key, asset_graph)
candidate_target_subset = target_subset.get_asset_subset(candidate_asset_key, asset_graph)
num_parent_partitions_being_requested_this_tick = parent_being_requested_this_tick_subset.size
has_self_dependency = any(
parent_key == candidate_asset_key for parent_key in candidate_node.parent_keys
)
# launching a self-dependant asset with a non-self-dependant asset can result in invalid
# runs being launched that don't respect lineage
if (
has_self_dependency
and parent_asset_key not in candidate_asset_graph_subset_unit.asset_keys
and num_parent_partitions_being_requested_this_tick > 0
):
return "Self-dependant assets cannot be materialized in the same run as other assets."
if is_self_dependency:
if parent_node.backfill_policy is None:
required_parent_subset = parent_subset
else:
# with a self dependancy, all of its parent partitions need to either have already
# been materialized or be in the candidate subset
required_parent_subset = parent_subset.compute_difference(
entity_subset_to_filter
).compute_difference(parent_materialized_subset)
if not required_parent_subset.is_empty:
return f"Waiting for the following parent partitions of a self-dependant asset to materialize: {_partition_subset_str(required_parent_subset.get_internal_subset_value(), check.not_none(parent_node.partitions_def))}"
else:
return None
if not (
# if there is a simple mapping between the parent and the child, then
# with the parent
has_identity_partition_mapping
# if there is not a simple mapping, we can only materialize this asset with its
# parent if...
or (
# there is a backfill policy for the parent
parent_node.backfill_policy is not None
# the same subset of parents is targeted as the child
and parent_target_subset.value == candidate_target_subset.value
and (
# there is no limit on the size of a single run or...
parent_node.backfill_policy.max_partitions_per_run is None
# a single run can materialize all requested parent partitions
or parent_node.backfill_policy.max_partitions_per_run
> num_parent_partitions_being_requested_this_tick
)
# all targeted parents are being requested this tick
and num_parent_partitions_being_requested_this_tick == parent_target_subset.size
)
):
failed_reason = (
f"partition mapping between {parent_node.key.to_user_string()} and {candidate_node.key.to_user_string()} is not simple and "
f"{parent_node.key.to_user_string()} does not meet requirements of: targeting the same partitions as "
f"{candidate_node.key.to_user_string()}, have all of its partitions requested in this iteration, having "
"a backfill policy, and that backfill policy size limit is not exceeded by adding "
f"{candidate_node.key.to_user_string()} to the run. {candidate_node.key.to_user_string()} can be materialized once {parent_node.key.to_user_string()} is materialized."
)
return failed_reason
return None
def _should_backfill_atomic_asset_graph_subset_unit(
asset_graph_view: AssetGraphView,
candidate_asset_graph_subset_unit: AssetGraphSubset,
asset_graph_subset_matched_so_far: AssetGraphSubset,
target_subset: AssetGraphSubset,
requested_subset: AssetGraphSubset,
materialized_subset: AssetGraphSubset,
failed_and_downstream_subset: AssetGraphSubset,
logger: logging.Logger,
) -> AssetGraphViewBfsFilterConditionResult:
failure_subset_values_with_reasons: list[tuple[EntitySubsetValue, str]] = []
candidate_entity_subsets = list(
asset_graph_view.iterate_asset_subsets(candidate_asset_graph_subset_unit)
)
# this value is the same for all passed in asset keys since they are always part of the same
# execution set
passed_subset_value = candidate_entity_subsets[0].get_internal_value()
candidate_asset_keys = [
candidate_entity_subset.key for candidate_entity_subset in candidate_entity_subsets
]
for candidate_asset_key in candidate_asset_keys:
# filter down the set of matching values for each asset key
passed_serializable_entity_subset = SerializableEntitySubset(
candidate_asset_key,
passed_subset_value,
)
entity_subset_to_filter = check.not_none(
asset_graph_view.get_subset_from_serializable_subset(passed_serializable_entity_subset)
)
if entity_subset_to_filter.is_empty:
break
entity_subset_to_filter, new_failure_subset_values_with_reasons = (
_should_backfill_atomic_asset_subset_unit(
asset_graph_view,
entity_subset_to_filter=entity_subset_to_filter,
candidate_asset_graph_subset_unit=candidate_asset_graph_subset_unit,
asset_graph_subset_matched_so_far=asset_graph_subset_matched_so_far,
target_subset=target_subset,
requested_subset=requested_subset,
materialized_subset=materialized_subset,
failed_and_downstream_subset=failed_and_downstream_subset,
logger=logger,
)
)
passed_subset_value = entity_subset_to_filter.value
failure_subset_values_with_reasons.extend(new_failure_subset_values_with_reasons)
passed_entity_subsets = []
for candidate_entity_subset in candidate_entity_subsets:
passed_entity_subsets.append(
check.not_none(
asset_graph_view.get_subset_from_serializable_subset(
SerializableEntitySubset(candidate_entity_subset.key, passed_subset_value)
)
)
)
failure_asset_graph_subsets_with_reasons = []
# Any failure partition values apply to all candidate asset keys, so construct a subset
# graph with that partition subset value for each key
for failure_subset_value, reason in failure_subset_values_with_reasons:
failure_entity_subsets = [
check.not_none(
asset_graph_view.get_subset_from_serializable_subset(
SerializableEntitySubset(candidate_entity_subset.key, failure_subset_value)
)
)
for candidate_entity_subset in candidate_entity_subsets
]
failure_asset_graph_subsets_with_reasons.append(
(
AssetGraphSubset.from_entity_subsets(
entity_subsets=failure_entity_subsets,
),
reason,
)
)
return AssetGraphViewBfsFilterConditionResult(
passed_asset_graph_subset=AssetGraphSubset.from_entity_subsets(passed_entity_subsets),
excluded_asset_graph_subsets_and_reasons=failure_asset_graph_subsets_with_reasons,
)
def _get_failed_asset_graph_subset(
asset_graph_view: AssetGraphView,
backfill_id: str,
materialized_subset: AssetGraphSubset,
) -> AssetGraphSubset:
"""Returns asset subset that materializations were requested for as part of the backfill, but were
not successfully materialized.
This function gets a list of all runs for the backfill that have failed and extracts the asset partitions
that were not materialized from those runs. However, we need to account for retried runs. If a run was
successfully retried, the original failed run will still be processed in this function. So we check the
failed asset partitions against the list of successfully materialized asset partitions. If an asset partition
is in the materialized_subset, it means the failed run was retried and the asset partition was materialized.
Includes canceled asset partitions. Implementation assumes that successful runs won't have any
failed partitions.
"""
instance_queryer = asset_graph_view.get_inner_queryer_for_back_compat()
runs = instance_queryer.instance.get_runs(
filters=RunsFilter(
tags={BACKFILL_ID_TAG: backfill_id},
statuses=[DagsterRunStatus.CANCELED, DagsterRunStatus.FAILURE],
)
)
result: AssetGraphSubset = AssetGraphSubset.create_empty_subset()
for run in runs:
planned_asset_keys = instance_queryer.get_planned_materializations_for_run(
run_id=run.run_id
)
completed_asset_keys = instance_queryer.get_current_materializations_for_run(
run_id=run.run_id
)
failed_asset_keys = planned_asset_keys - completed_asset_keys
if (
run.tags.get(ASSET_PARTITION_RANGE_START_TAG)
and run.tags.get(ASSET_PARTITION_RANGE_END_TAG)
and run.tags.get(PARTITION_NAME_TAG) is None
):
# reconstruct the partition keys from a chunked backfill run
partition_range = PartitionKeyRange(
start=run.tags[ASSET_PARTITION_RANGE_START_TAG],
end=run.tags[ASSET_PARTITION_RANGE_END_TAG],
)
candidate_subset = AssetGraphSubset.from_entity_subsets(
[
asset_graph_view.get_entity_subset_in_range(asset_key, partition_range)
for asset_key in failed_asset_keys
]
)
else:
# a regular backfill run that run on a single partition
partition_key = run.tags.get(PARTITION_NAME_TAG)
candidate_subset = AssetGraphSubset.from_asset_partition_set(
{AssetKeyPartitionKey(asset_key, partition_key) for asset_key in failed_asset_keys},
asset_graph_view.asset_graph,
)
asset_subset_still_failed = candidate_subset - materialized_subset
result = result | asset_subset_still_failed
return result
| AssetBackfillIterationResult |
python | kamyu104__LeetCode-Solutions | Python/minimum-flips-to-make-a-or-b-equal-to-c.py | {
"start": 30,
"end": 450
} | class ____(object):
def minFlips(self, a, b, c):
"""
:type a: int
:type b: int
:type c: int
:rtype: int
"""
def number_of_1_bits(n):
result = 0
while n:
n &= n-1
result += 1
return result
return number_of_1_bits((a|b)^c) + number_of_1_bits(a&b&~c)
# Time: O(31)
# Space: O(1)
| Solution |
python | realpython__materials | flask-connexion-rest-part-4/models.py | {
"start": 1476,
"end": 1728
} | class ____(ma.ModelSchema):
def __init__(self, **kwargs):
super().__init__(strict=True, **kwargs)
class Meta:
model = Note
sqla_session = db.session
person = fields.Nested("NotePersonSchema", default=None)
| NoteSchema |
python | google__flatbuffers | python/flatbuffers/number_types.py | {
"start": 1846,
"end": 1997
} | class ____(object):
bytewidth = 2
min_val = -(2**15)
max_val = (2**15) - 1
py_type = int
name = "int16"
packer_type = packer.int16
| Int16Flags |
python | django__django | tests/expressions/models.py | {
"start": 2237,
"end": 2444
} | class ____(models.Model):
experiment = models.ForeignKey(Experiment, models.CASCADE)
result_time = models.DateTimeField()
def __str__(self):
return "Result at %s" % self.result_time
| Result |
python | getsentry__sentry | src/sentry/sentry_apps/models/sentry_app_component.py | {
"start": 428,
"end": 1438
} | class ____(Model):
__relocation_scope__ = RelocationScope.Global
uuid = UUIDField(unique=True, auto_add=True)
sentry_app = FlexibleForeignKey("sentry.SentryApp", related_name="components")
type = models.CharField(max_length=64)
schema = models.JSONField()
class Meta:
app_label = "sentry"
db_table = "sentry_sentryappcomponent"
@property
def app_schema(self) -> MutableMapping[str, Any]:
"""Provides consistent interface with RpcSentryAppComponent"""
return self.schema
@classmethod
def sanitize_relocation_json(
cls, json: Any, sanitizer: Sanitizer, model_name: NormalizedModelName | None = None
) -> None:
model_name = get_model_name(cls) if model_name is None else model_name
super().sanitize_relocation_json(json, sanitizer, model_name)
sanitizer.set_string(json, SanitizableField(model_name, "type"))
sanitizer.set_json(json, SanitizableField(model_name, "schema"), {})
| SentryAppComponent |
python | PyCQA__pyflakes | pyflakes/checker.py | {
"start": 7369,
"end": 7489
} | class ____:
"""
A dictionary key of a type that we cannot or do not check for duplicates.
"""
| UnhandledKeyType |
python | numpy__numpy | numpy/_core/tests/test_indexing.py | {
"start": 49672,
"end": 52190
} | class ____:
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
assert_raises(TypeError, np.reshape, a, (True, -1))
assert_raises(TypeError, np.reshape, a, (np.bool(True), -1))
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
assert_raises(TypeError, operator.index, np.array(True))
assert_raises(TypeError, operator.index, np.True_)
assert_raises(TypeError, np.take, args=(a, [0], False))
def test_boolean_indexing_weirdness(self):
# Weird boolean indexing things
a = np.ones((2, 3, 4))
assert a[False, True, ...].shape == (0, 2, 3, 4)
assert a[True, [0, 1], True, True, [1], [[2]]].shape == (1, 2)
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
def test_boolean_indexing_fast_path(self):
# These used to either give the wrong error, or incorrectly give no
# error.
a = np.ones((3, 3))
# This used to incorrectly work (and give an array of shape (0,))
idx1 = np.array([[False] * 9])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along axis 0; "
"size of axis is 3 but size of corresponding boolean axis is 1",
lambda: a[idx1])
# This used to incorrectly give a ValueError: operands could not be
# broadcast together
idx2 = np.array([[False] * 8 + [True]])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along axis 0; "
"size of axis is 3 but size of corresponding boolean axis is 1",
lambda: a[idx2])
# This is the same as it used to be. The above two should work like this.
idx3 = np.array([[False] * 10])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along axis 0; "
"size of axis is 3 but size of corresponding boolean axis is 1",
lambda: a[idx3])
# This used to give ValueError: non-broadcastable operand
a = np.ones((1, 1, 2))
idx = np.array([[[True], [False]]])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along axis 1; "
"size of axis is 1 but size of corresponding boolean axis is 2",
lambda: a[idx])
| TestBooleanIndexing |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/numpy_util_test.py | {
"start": 1100,
"end": 4903
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
global_ids = test_util.create_device_ids_array((2, 2))
local_ids = np.ravel(global_ids).tolist()
mesh_dict = {
device: layout.Mesh([_MESH_DIM_X, _MESH_DIM_Y], global_ids, local_ids,
test_util.create_device_list((2, 2), device))
for device in ('CPU', 'GPU', 'TPU')
}
self.mesh = self.configTestMesh(mesh_dict)
def test_tensor_from_replicated(self):
tensors = [np.arange(4) for i in range(self.mesh.size)]
replicated_layout = layout.Layout([layout.UNSHARDED, layout.UNSHARDED],
mesh=self.mesh)
self.assertAllClose(
np.arange(4), numpy_util.unpacked_to_numpy(tensors, replicated_layout))
def test_tensor_x_sharded(self):
t00 = np.arange(8).reshape(2, 4)
t01 = np.arange(8).reshape(2, 4)
t10 = np.arange(8, 16).reshape(2, 4)
t11 = np.arange(8, 16).reshape(2, 4)
tensors = [t00, t01, t10, t11]
sharded_on_x = layout.Layout([_MESH_DIM_X, layout.UNSHARDED],
mesh=self.mesh)
self.assertAllClose(
np.arange(16).reshape(4, 4),
numpy_util.unpacked_to_numpy(tensors, sharded_on_x))
def test_tensor_y_sharded(self):
# [[0,1], [4,5], [8,9], [12,13]]
t00 = np.arange(16).reshape(4, 4)[:, :-2]
# [[2,3], [6,7], [10,11], [14,15]]
t01 = np.arange(16).reshape(4, 4)[:, 2:4]
t10 = np.arange(16).reshape(4, 4)[:, :-2]
t11 = np.arange(16).reshape(4, 4)[:, 2:4]
tensors = [t00, t01, t10, t11]
sharded_on_y = layout.Layout([layout.UNSHARDED, _MESH_DIM_Y],
mesh=self.mesh)
self.assertAllClose(
numpy_util.unpacked_to_numpy(tensors, sharded_on_y),
np.arange(16).reshape(4, 4))
def test_tensor_x_sharded_on_mesh_y(self):
t00 = np.arange(8).reshape(2, 4)
t01 = np.arange(8, 16).reshape(2, 4)
t10 = np.arange(8).reshape(2, 4)
t11 = np.arange(8, 16).reshape(2, 4)
tensors = [t00, t01, t10, t11]
sharded_on_y = layout.Layout([_MESH_DIM_Y, layout.UNSHARDED],
mesh=self.mesh)
self.assertAllClose(
numpy_util.unpacked_to_numpy(tensors, sharded_on_y),
np.arange(16).reshape(4, 4))
def test_tensor_y_sharded_on_mesh_x(self):
# [[0,1], [4,5], [8,9], [12,13]]
t00 = np.arange(16).reshape(4, 4)[:, :-2]
t01 = np.arange(16).reshape(4, 4)[:, :-2]
# [[2,3], [6,7], [10,11], [14,15]]
t10 = np.arange(16).reshape(4, 4)[:, 2:4]
t11 = np.arange(16).reshape(4, 4)[:, 2:4]
tensors = [t00, t01, t10, t11]
sharded_on_x = layout.Layout([layout.UNSHARDED, _MESH_DIM_X],
mesh=self.mesh)
self.assertAllClose(
numpy_util.unpacked_to_numpy(tensors, sharded_on_x),
np.arange(16).reshape(4, 4))
def test_tensor_x_y_sharded_x_y(self):
t00 = np.array([[0, 1], [4, 5]])
t01 = np.array([[2, 3], [6, 7]])
t10 = np.array([[8, 9], [12, 13]])
t11 = np.array([[10, 11], [14, 15]])
tensors = [t00, t01, t10, t11]
sharded_on_x_y = layout.Layout([_MESH_DIM_X, _MESH_DIM_Y], mesh=self.mesh)
self.assertAllClose(
numpy_util.unpacked_to_numpy(tensors, sharded_on_x_y),
np.arange(16).reshape(4, 4))
def test_tensor_x_y_sharded_y_x(self):
t00 = np.array([[0, 1], [4, 5]])
t01 = np.array([[8, 9], [12, 13]])
t10 = np.array([[2, 3], [6, 7]])
t11 = np.array([[10, 11], [14, 15]])
tensors = [t00, t01, t10, t11]
sharded_on_y_x = layout.Layout([_MESH_DIM_Y, _MESH_DIM_X], mesh=self.mesh)
self.assertAllClose(
numpy_util.unpacked_to_numpy(tensors, sharded_on_y_x),
np.arange(16).reshape(4, 4))
if __name__ == '__main__':
tf_test.main()
| NumpyUtilTest |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_alphabetical.py | {
"start": 789,
"end": 2323
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
# Please see {some doc} for information on how to choose an id string for your Metric.
condition_metric_name = "column_values.are_alphabetical"
condition_value_keys = ("reverse",)
# This method defines the business logic for evaluating your metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, reverse=False, **kwargs):
# lowercase the whole column to avoid issues with capitalization
# (since every capital letter is "before" the lowercase letters)
column_lower = column.map(str.lower)
column_length = column.size
# choose the operator to use for comparison of consecutive items
# could be easily adapted for other comparisons, perhaps of custom objects
if reverse:
compare_function = operator.ge
else:
compare_function = operator.le
output = [True] # first value is automatically in order
for i in range(1, column_length):
if column_lower[i] and column_lower[i - 1]: # make sure we aren't comparing Nones
output.append(compare_function(column_lower[i - 1], column_lower[i]))
else:
output.append(None)
return pandas.Series(output)
# This class defines the Expectation itself
# The main business logic for calculation lives here.
| ColumnValuesAreAlphabetical |
python | doocs__leetcode | lcof2/剑指 Offer II 112. 最长递增路径/Solution.py | {
"start": 0,
"end": 500
} | class ____:
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
@cache
def dfs(i, j):
ans = 1
for a, b in [[-1, 0], [1, 0], [0, 1], [0, -1]]:
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and matrix[x][y] > matrix[i][j]:
ans = max(ans, dfs(x, y) + 1)
return ans
m, n = len(matrix), len(matrix[0])
return max(dfs(i, j) for i in range(m) for j in range(n))
| Solution |
python | doocs__leetcode | solution/0200-0299/0279.Perfect Squares/Solution.py | {
"start": 0,
"end": 379
} | class ____:
def numSquares(self, n: int) -> int:
m = int(sqrt(n))
f = [[inf] * (n + 1) for _ in range(m + 1)]
f[0][0] = 0
for i in range(1, m + 1):
for j in range(n + 1):
f[i][j] = f[i - 1][j]
if j >= i * i:
f[i][j] = min(f[i][j], f[i][j - i * i] + 1)
return f[m][n]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/find-the-safest-path-in-a-grid.py | {
"start": 4425,
"end": 6245
} | class ____(object):
def maximumSafenessFactor(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
DIRECTIONS = ((1, 0), (0, 1), (-1, 0), (0, -1))
def bfs():
dist = [[0 if grid[r][c] == 1 else -1 for c in xrange(len(grid[0]))] for r in xrange(len(grid))]
q = [(r, c) for r in xrange(len(grid)) for c in xrange(len(grid[0])) if grid[r][c]]
d = 0
while q:
new_q = []
for r, c in q:
for dr, dc in DIRECTIONS:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(dist) and 0 <= nc < len(dist[0]) and dist[nr][nc] == -1):
continue
dist[nr][nc] = d+1
new_q.append((nr, nc))
q = new_q
d += 1
return dist
def check(x):
lookup = [[False]*len(dist[0]) for _ in xrange(len(dist))]
q = [(0, 0)]
lookup[0][0] = True
while q:
new_q = []
for r, c in q:
for dr, dc in DIRECTIONS:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(dist) and 0 <= nc < len(dist[0]) and dist[nr][nc] >= x and not lookup[nr][nc]):
continue
lookup[nr][nc] = True
new_q.append((nr, nc))
q = new_q
return lookup[-1][-1]
dist = bfs()
left, right = 0, dist[0][0]
while left <= right:
mid = left + (right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right
| Solution3 |
python | pandas-dev__pandas | pandas/core/arrays/integer.py | {
"start": 2016,
"end": 5130
} | class ____(NumericArray):
"""
Array of integer (optional missing) values.
Uses :attr:`pandas.NA` as the missing value.
.. warning::
IntegerArray is currently experimental, and its API or internal
implementation may change without warning.
We represent an IntegerArray with 2 numpy arrays:
- data: contains a numpy integer array of the appropriate dtype
- mask: a boolean array holding a mask on the data, True is missing
To construct an IntegerArray from generic array-like input, use
:func:`pandas.array` with one of the integer dtypes (see examples).
See :ref:`integer_na` for more.
Parameters
----------
values : numpy.ndarray
A 1-d integer-dtype array.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values.
copy : bool, default False
Whether to copy the `values` and `mask`.
Attributes
----------
None
Methods
-------
None
Returns
-------
IntegerArray
See Also
--------
array : Create an array using the appropriate dtype, including ``IntegerArray``.
Int32Dtype : An ExtensionDtype for int32 integer data.
UInt16Dtype : An ExtensionDtype for uint16 integer data.
Examples
--------
Create an IntegerArray with :func:`pandas.array`.
>>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
>>> int_array
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
String aliases for the dtypes are also available. They are capitalized.
>>> pd.array([1, None, 3], dtype="Int32")
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
>>> pd.array([1, None, 3], dtype="UInt16")
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: UInt16
"""
_dtype_cls = IntegerDtype
_dtype_docstring = """
An ExtensionDtype for {dtype} integer data.
Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`.
Attributes
----------
None
Methods
-------
None
See Also
--------
Int8Dtype : 8-bit nullable integer type.
Int16Dtype : 16-bit nullable integer type.
Int32Dtype : 32-bit nullable integer type.
Int64Dtype : 64-bit nullable integer type.
Examples
--------
For Int8Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype())
>>> ser.dtype
Int8Dtype()
For Int16Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype())
>>> ser.dtype
Int16Dtype()
For Int32Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype())
>>> ser.dtype
Int32Dtype()
For Int64Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype())
>>> ser.dtype
Int64Dtype()
For UInt8Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype())
>>> ser.dtype
UInt8Dtype()
For UInt16Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype())
>>> ser.dtype
UInt16Dtype()
For UInt32Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype())
>>> ser.dtype
UInt32Dtype()
For UInt64Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype())
>>> ser.dtype
UInt64Dtype()
"""
# create the Dtype
@register_extension_dtype
@set_module("pandas")
| IntegerArray |
python | neetcode-gh__leetcode | python/0658-find-k-closest-elements.py | {
"start": 49,
"end": 953
} | class ____:
def findClosestElements(self, arr: List[int], k: int, x: int) -> List[int]:
l, r = 0, len(arr) - 1
# Find index of x or the closest val to x
val, idx = arr[0], 0
while l <= r:
m = (l + r) // 2
curDiff, resDiff = abs(arr[m] - x), abs(val - x)
if curDiff < resDiff or (curDiff == resDiff and arr[m] < val):
val, idx = arr[m], m
if arr[m] < x:
l = m + 1
elif arr[m] > x:
r = m - 1
else:
break
l = r = idx
for i in range(k - 1):
if l == 0:
r += 1
elif r == len(arr) - 1 or x - arr[l - 1] <= arr[r + 1] - x:
l -= 1
else:
r += 1
return arr[l : r + 1]
# Log(n-k) + k
# Elegant but very difficult to understand
| Solution |
python | chroma-core__chroma | chromadb/api/collection_configuration.py | {
"start": 401,
"end": 626
} | class ____(TypedDict, total=False):
space: Space
ef_construction: int
max_neighbors: int
ef_search: int
num_threads: int
batch_size: int
sync_threshold: int
resize_factor: float
| HNSWConfiguration |
python | huggingface__transformers | src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py | {
"start": 120552,
"end": 121026
} | class ____(BigBirdPegasusPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = BigBirdPegasusDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
| BigBirdPegasusDecoderWrapper |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/list1.py | {
"start": 520,
"end": 668
} | class ____(Generic[_T]):
def __get__(self, instance: Any, owner: Any) -> _T: ...
def __set__(self, instance: Any, value: _T) -> None: ...
| Baz |
python | django-guardian__django-guardian | guardian/testapp/migrations/0004_childtestmodel_parenttestmodel.py | {
"start": 126,
"end": 1173
} | class ____(migrations.Migration):
dependencies = [
("testapp", "0003_auto_20190611_0440"),
]
operations = [
migrations.CreateModel(
name="ParentTestModel",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_on", models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name="ChildTestModel",
fields=[
(
"parent_id",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="testapp.ParentTestModel",
),
),
("name", models.CharField(max_length=31)),
],
bases=("testapp.parenttestmodel",),
),
]
| Migration |
python | pytorch__pytorch | test/mobile/lightweight_dispatch/tests_setup.py | {
"start": 1129,
"end": 1438
} | class ____(torch.nn.Module):
def forward(self, index):
a = torch.zeros(2, 2)
a[0][1] = 1
a[1][0] = 2
a[1][1] = 3
return a[index]
# gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
@save_model
| ModelWithTensorOptional |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 9531,
"end": 10177
} | class ____:
def __init__(self, length=64, seed=42, batch_size=8):
self.length = length
np.random.seed(seed)
sizes = np.random.randint(1, 20, (length // batch_size,))
# For easy batching, we make every batch_size consecutive samples the same size.
self.xs = [np.random.normal(size=(s,)).astype(np.float32) for s in sizes.repeat(batch_size)]
self.ys = [np.random.normal(size=(s,)).astype(np.float32) for s in sizes.repeat(batch_size)]
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_x": self.xs[i], "labels": self.ys[i]}
| DynamicShapesDataset |
python | kubernetes-client__python | kubernetes/client/models/v1_git_repo_volume_source.py | {
"start": 383,
"end": 5828
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'directory': 'str',
'repository': 'str',
'revision': 'str'
}
attribute_map = {
'directory': 'directory',
'repository': 'repository',
'revision': 'revision'
}
def __init__(self, directory=None, repository=None, revision=None, local_vars_configuration=None): # noqa: E501
"""V1GitRepoVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._directory = None
self._repository = None
self._revision = None
self.discriminator = None
if directory is not None:
self.directory = directory
self.repository = repository
if revision is not None:
self.revision = revision
@property
def directory(self):
"""Gets the directory of this V1GitRepoVolumeSource. # noqa: E501
directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. # noqa: E501
:return: The directory of this V1GitRepoVolumeSource. # noqa: E501
:rtype: str
"""
return self._directory
@directory.setter
def directory(self, directory):
"""Sets the directory of this V1GitRepoVolumeSource.
directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. # noqa: E501
:param directory: The directory of this V1GitRepoVolumeSource. # noqa: E501
:type: str
"""
self._directory = directory
@property
def repository(self):
"""Gets the repository of this V1GitRepoVolumeSource. # noqa: E501
repository is the URL # noqa: E501
:return: The repository of this V1GitRepoVolumeSource. # noqa: E501
:rtype: str
"""
return self._repository
@repository.setter
def repository(self, repository):
"""Sets the repository of this V1GitRepoVolumeSource.
repository is the URL # noqa: E501
:param repository: The repository of this V1GitRepoVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and repository is None: # noqa: E501
raise ValueError("Invalid value for `repository`, must not be `None`") # noqa: E501
self._repository = repository
@property
def revision(self):
"""Gets the revision of this V1GitRepoVolumeSource. # noqa: E501
revision is the commit hash for the specified revision. # noqa: E501
:return: The revision of this V1GitRepoVolumeSource. # noqa: E501
:rtype: str
"""
return self._revision
@revision.setter
def revision(self, revision):
"""Sets the revision of this V1GitRepoVolumeSource.
revision is the commit hash for the specified revision. # noqa: E501
:param revision: The revision of this V1GitRepoVolumeSource. # noqa: E501
:type: str
"""
self._revision = revision
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1GitRepoVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1GitRepoVolumeSource):
return True
return self.to_dict() != other.to_dict()
| V1GitRepoVolumeSource |
python | pypa__setuptools | pkg_resources/tests/test_pkg_resources.py | {
"start": 15221,
"end": 17108
} | class ____:
def fake_site_packages(self, tmp_path, monkeypatch, dist_files):
site_packages = tmp_path / "site-packages"
site_packages.mkdir()
for file, content in self.FILES.items():
path = site_packages / file
path.parent.mkdir(exist_ok=True, parents=True)
path.write_text(inspect.cleandoc(content), encoding="utf-8")
monkeypatch.setattr(sys, "path", [site_packages])
return os.fspath(site_packages)
FILES = {
"pkg1_mod-1.2.3.dist-info/METADATA": """
Metadata-Version: 2.4
Name: pkg1.mod
Version: 1.2.3
""",
"pkg2.mod-0.42.dist-info/METADATA": """
Metadata-Version: 2.1
Name: pkg2.mod
Version: 0.42
""",
"pkg3_mod.egg-info/PKG-INFO": """
Name: pkg3.mod
Version: 1.2.3.4
""",
"pkg4.mod.egg-info/PKG-INFO": """
Name: pkg4.mod
Version: 0.42.1
""",
}
@pytest.mark.parametrize(
("version", "requirement"),
[
("1.2.3", "pkg1.mod>=1"),
("0.42", "pkg2.mod>=0.4"),
("1.2.3.4", "pkg3.mod<=2"),
("0.42.1", "pkg4.mod>0.2,<1"),
],
)
def test_require_non_normalised_name(
self, tmp_path, monkeypatch, version, requirement
):
# https://github.com/pypa/setuptools/issues/4853
site_packages = self.fake_site_packages(tmp_path, monkeypatch, self.FILES)
ws = pkg_resources.WorkingSet([site_packages])
for req in [requirement, requirement.replace(".", "-")]:
[dist] = ws.require(req)
assert dist.version == version
assert os.path.samefile(
os.path.commonpath([dist.location, site_packages]), site_packages
)
| TestWorkdirRequire |
python | fsspec__filesystem_spec | fsspec/implementations/dbfs.py | {
"start": 255,
"end": 607
} | class ____(Exception):
"""
Helper class for exceptions raised in this module.
"""
def __init__(self, error_code, message, details=None):
"""Create a new DatabricksException"""
super().__init__(message)
self.error_code = error_code
self.message = message
self.details = details
| DatabricksException |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 15576,
"end": 15861
} | class ____:
xlBox = 0 # from enum XlBarShape
xlConeToMax = 5 # from enum XlBarShape
xlConeToPoint = 4 # from enum XlBarShape
xlCylinder = 3 # from enum XlBarShape
xlPyramidToMax = 2 # from enum XlBarShape
xlPyramidToPoint = 1 # from enum XlBarShape
| BarShape |
python | ipython__ipython | tests/test_zzz_autoreload.py | {
"start": 5387,
"end": 22933
} | class ____(Fixture):
def test_reload_enums(self):
mod_name, mod_fn = self.new_module(
textwrap.dedent(
"""
from enum import Enum
class MyEnum(Enum):
A = 'A'
B = 'B'
"""
)
)
self.shell.magic_autoreload("2")
self.shell.magic_aimport(mod_name)
self.write_file(
mod_fn,
textwrap.dedent(
"""
from enum import Enum
class MyEnum(Enum):
A = 'A'
B = 'B'
C = 'C'
"""
),
)
with tt.AssertNotPrints(
("[autoreload of %s failed:" % mod_name), channel="stderr"
):
self.shell.run_code("pass") # trigger another reload
def test_reload_class_type(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
class Test():
def meth(self):
return "old"
"""
)
assert "test" not in self.shell.ns
assert "result" not in self.shell.ns
self.shell.run_code("from %s import Test" % mod_name)
self.shell.run_code("test = Test()")
self.write_file(
mod_fn,
"""
class Test():
def meth(self):
return "new"
""",
)
test_object = self.shell.ns["test"]
# important to trigger autoreload logic !
self.shell.run_code("pass")
test_class = pickle_get_current_class(test_object)
assert isinstance(test_object, test_class)
# extra check.
self.shell.run_code("import pickle")
self.shell.run_code("p = pickle.dumps(test)")
def test_reload_class_attributes(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
textwrap.dedent(
"""
class MyClass:
def __init__(self, a=10):
self.a = a
self.b = 22
# self.toto = 33
def square(self):
print('compute square')
return self.a*self.a
"""
)
)
self.shell.run_code("from %s import MyClass" % mod_name)
self.shell.run_code("first = MyClass(5)")
self.shell.run_code("first.square()")
with self.assertRaises(AttributeError):
self.shell.run_code("first.cube()")
with self.assertRaises(AttributeError):
self.shell.run_code("first.power(5)")
self.shell.run_code("first.b")
with self.assertRaises(AttributeError):
self.shell.run_code("first.toto")
# remove square, add power
self.write_file(
mod_fn,
textwrap.dedent(
"""
class MyClass:
def __init__(self, a=10):
self.a = a
self.b = 11
def power(self, p):
print('compute power '+str(p))
return self.a**p
"""
),
)
self.shell.run_code("second = MyClass(5)")
for object_name in {"first", "second"}:
self.shell.run_code(f"{object_name}.power(5)")
with self.assertRaises(AttributeError):
self.shell.run_code(f"{object_name}.cube()")
with self.assertRaises(AttributeError):
self.shell.run_code(f"{object_name}.square()")
self.shell.run_code(f"{object_name}.b")
self.shell.run_code(f"{object_name}.a")
with self.assertRaises(AttributeError):
self.shell.run_code(f"{object_name}.toto")
@skipif_not_numpy
def test_comparing_numpy_structures(self):
self.shell.magic_autoreload("2")
self.shell.run_code("1+1")
mod_name, mod_fn = self.new_module(
textwrap.dedent(
"""
import numpy as np
class MyClass:
a = (np.array((.1, .2)),
np.array((.2, .3)))
"""
)
)
self.shell.run_code("from %s import MyClass" % mod_name)
self.shell.run_code("first = MyClass()")
# change property `a`
self.write_file(
mod_fn,
textwrap.dedent(
"""
import numpy as np
class MyClass:
a = (np.array((.3, .4)),
np.array((.5, .6)))
"""
),
)
with tt.AssertNotPrints(
("[autoreload of %s failed:" % mod_name), channel="stderr"
):
self.shell.run_code("pass") # trigger another reload
def test_autoload_newly_added_objects(self):
# All of these fail with %autoreload 2
self.shell.magic_autoreload("3")
mod_code = """
def func1(): pass
"""
mod_name, mod_fn = self.new_module(textwrap.dedent(mod_code))
self.shell.run_code(f"from {mod_name} import *")
self.shell.run_code("func1()")
with self.assertRaises(NameError):
self.shell.run_code("func2()")
with self.assertRaises(NameError):
self.shell.run_code("t = Test()")
with self.assertRaises(NameError):
self.shell.run_code("number")
# ----------- TEST NEW OBJ LOADED --------------------------
new_code = """
def func1(): pass
def func2(): pass
class Test: pass
number = 0
from enum import Enum
class TestEnum(Enum):
A = 'a'
"""
self.write_file(mod_fn, textwrap.dedent(new_code))
# test function now exists in shell's namespace namespace
self.shell.run_code("func2()")
# test function now exists in module's dict
self.shell.run_code(f"import sys; sys.modules['{mod_name}'].func2()")
# test class now exists
self.shell.run_code("t = Test()")
# test global built-in var now exists
self.shell.run_code("number")
# test the enumerations gets loaded successfully
self.shell.run_code("TestEnum.A")
# ----------- TEST NEW OBJ CAN BE CHANGED --------------------
new_code = """
def func1(): return 'changed'
def func2(): return 'changed'
class Test:
def new_func(self):
return 'changed'
number = 1
from enum import Enum
class TestEnum(Enum):
A = 'a'
B = 'added'
"""
self.write_file(mod_fn, textwrap.dedent(new_code))
self.shell.run_code("assert func1() == 'changed'")
self.shell.run_code("assert func2() == 'changed'")
self.shell.run_code("t = Test(); assert t.new_func() == 'changed'")
self.shell.run_code("assert number == 1")
if sys.version_info < (3, 12):
self.shell.run_code("assert TestEnum.B.value == 'added'")
# ----------- TEST IMPORT FROM MODULE --------------------------
new_mod_code = """
from enum import Enum
class Ext(Enum):
A = 'ext'
def ext_func():
return 'ext'
class ExtTest:
def meth(self):
return 'ext'
ext_int = 2
"""
new_mod_name, new_mod_fn = self.new_module(textwrap.dedent(new_mod_code))
current_mod_code = f"""
from {new_mod_name} import *
"""
self.write_file(mod_fn, textwrap.dedent(current_mod_code))
self.shell.run_code("assert Ext.A.value == 'ext'")
self.shell.run_code("assert ext_func() == 'ext'")
self.shell.run_code("t = ExtTest(); assert t.meth() == 'ext'")
self.shell.run_code("assert ext_int == 2")
def test_autoload3_import_Y_as_Z(self):
self.shell.magic_autoreload("3")
mod_code = """
def func1(): pass
n = 1
"""
mod_name, mod_fn = self.new_module(textwrap.dedent(mod_code))
self.shell.run_code(f"from {mod_name} import n as foo")
self.shell.run_code("foo")
with self.assertRaises(NameError):
self.shell.run_code("func1()")
new_code = """
n = 100
def func2(): pass
def func1(): pass
m = 5
"""
self.write_file(mod_fn, textwrap.dedent(new_code))
self.shell.run_code("foo")
with self.assertRaises(NameError):
self.shell.run_code("n")
def test_autoload3_import_Y_as_Z_overloading(self):
self.shell.magic_autoreload("3")
mod_code = """
def func1(): pass
n = 1
"""
mod_name, mod_fn = self.new_module(textwrap.dedent(mod_code))
self.shell.run_code(f"from {mod_name} import n as foo")
self.shell.run_code("foo")
with self.assertRaises(NameError):
self.shell.run_code("func1()")
new_code = """
n = 100
def func2(): pass
def func1(): pass
foo = 45
"""
self.write_file(mod_fn, textwrap.dedent(new_code))
self.shell.run_code(f"assert foo == 100")
self.shell.run_code(f"from {mod_name} import foo")
self.shell.run_code(f"assert foo == 45")
def test_autoload3_normalimport(self):
self.shell.magic_autoreload("3")
mod_code = """
def func1(): pass
n = 1
"""
mod_name, mod_fn = self.new_module(textwrap.dedent(mod_code))
self.shell.run_code(f"import {mod_name}")
self.shell.run_code(f"{mod_name}.func1()")
self.shell.run_code(f"{mod_name}.n")
new_code = """
n = 100
def func2(): pass
def func1(): pass
m = 5
"""
self.write_file(mod_fn, textwrap.dedent(new_code))
self.shell.run_code(f"{mod_name}.func1()")
self.shell.run_code(f"{mod_name}.n")
self.shell.run_code(f"{mod_name}.func2()")
self.shell.run_code(f"{mod_name}.m")
self.shell.run_code(f"from {mod_name} import n")
self.shell.run_code(f"{mod_name}.func1()")
self.shell.run_code(f"n")
def test_autoload3_normalimport_2(self):
self.shell.magic_autoreload("3")
mod_code = """
def func1(): pass
n = 1
"""
mod_name, mod_fn = self.new_module(textwrap.dedent(mod_code))
self.shell.run_code(f"from {mod_name} import n")
with self.assertRaises(NameError):
self.shell.run_code("func1()")
self.shell.run_code("n")
new_code = """
n = 100
def func2(): pass
def func1(): pass
m = 5
"""
self.shell.run_code(f"import {mod_name}")
self.write_file(mod_fn, textwrap.dedent(new_code))
self.shell.run_code(f"{mod_name}.func1()")
self.shell.run_code(f"{mod_name}.n")
self.shell.run_code(f"{mod_name}.func2()")
self.shell.run_code(f"{mod_name}.m")
self.shell.run_code(f"n")
def test_autoload_3_does_not_add_all(self):
# Tests that %autoreload 3 does not effectively run from X import *
self.shell.magic_autoreload("3")
mod_code = """
def func1(): pass
n = 1
"""
mod_name, mod_fn = self.new_module(textwrap.dedent(mod_code))
self.shell.run_code(f"from {mod_name} import n")
self.shell.run_code("n")
with self.assertRaises(NameError):
self.shell.run_code("func()")
new_code = """
n = 1
def func2(): pass
def func1(): pass
m = 5
"""
self.write_file(mod_fn, textwrap.dedent(new_code))
self.shell.run_code("n")
with self.assertRaises(NameError):
self.shell.run_code("func1()")
with self.assertRaises(NameError):
self.shell.run_code("func2()")
with self.assertRaises(NameError):
self.shell.run_code("m")
self.shell.run_code(f"from {mod_name} import m")
self.shell.run_code("n")
self.shell.run_code("m")
with self.assertRaises(NameError):
self.shell.run_code("func1()")
with self.assertRaises(NameError):
self.shell.run_code("func2()")
self.shell.run_code(f"from {mod_name} import func1,func2")
self.shell.run_code("n")
self.shell.run_code("m")
self.shell.run_code("func1()")
self.shell.run_code("func2()")
def test_verbose_names(self):
# Asserts correspondense between original mode names and their verbose equivalents.
@dataclass
class AutoreloadSettings:
check_all: bool
enabled: bool
autoload_obj: bool
def gather_settings(mode):
self.shell.magic_autoreload(mode)
module_reloader = self.shell.auto_magics._reloader
return AutoreloadSettings(
module_reloader.check_all,
module_reloader.enabled,
module_reloader.autoload_obj,
)
assert gather_settings("0") == gather_settings("off")
assert gather_settings("0") == gather_settings("OFF") # Case insensitive
assert gather_settings("1") == gather_settings("explicit")
assert gather_settings("2") == gather_settings("all")
assert gather_settings("3") == gather_settings("complete")
# And an invalid mode name raises an exception.
with self.assertRaises(ValueError):
self.shell.magic_autoreload("4")
def test_aimport_parsing(self):
# Modules can be included or excluded all in one line.
module_reloader = self.shell.auto_magics._reloader
self.shell.magic_aimport("os") # import and mark `os` for auto-reload.
assert module_reloader.modules["os"] is True
assert "os" not in module_reloader.skip_modules.keys()
self.shell.magic_aimport("-math") # forbid autoreloading of `math`
assert module_reloader.skip_modules["math"] is True
assert "math" not in module_reloader.modules.keys()
self.shell.magic_aimport(
"-os, math"
) # Can do this all in one line; wasn't possible before.
assert module_reloader.modules["math"] is True
assert "math" not in module_reloader.skip_modules.keys()
assert module_reloader.skip_modules["os"] is True
assert "os" not in module_reloader.modules.keys()
def test_autoreload_output(self):
self.shell.magic_autoreload("complete")
mod_code = """
def func1(): pass
"""
mod_name, mod_fn = self.new_module(mod_code)
self.shell.run_code(f"import {mod_name}")
with tt.AssertPrints("", channel="stdout"): # no output; this is default
self.shell.run_code("pass")
self.shell.magic_autoreload("complete --print")
self.write_file(mod_fn, mod_code) # "modify" the module
with tt.AssertPrints(
f"Reloading '{mod_name}'.", channel="stdout"
): # see something printed out
self.shell.run_code("pass")
self.shell.magic_autoreload("complete -p")
self.write_file(mod_fn, mod_code) # "modify" the module
with tt.AssertPrints(
f"Reloading '{mod_name}'.", channel="stdout"
): # see something printed out
self.shell.run_code("pass")
self.shell.magic_autoreload("complete --print --log")
self.write_file(mod_fn, mod_code) # "modify" the module
with tt.AssertPrints(
f"Reloading '{mod_name}'.", channel="stdout"
): # see something printed out
self.shell.run_code("pass")
self.shell.magic_autoreload("complete --print --log")
self.write_file(mod_fn, mod_code) # "modify" the module
with self.assertLogs(logger="autoreload") as lo: # see something printed out
self.shell.run_code("pass")
assert lo.output == [f"INFO:autoreload:Reloading '{mod_name}'."]
self.shell.magic_autoreload("complete -l")
self.write_file(mod_fn, mod_code) # "modify" the module
with self.assertLogs(logger="autoreload") as lo: # see something printed out
self.shell.run_code("pass")
assert lo.output == [f"INFO:autoreload:Reloading '{mod_name}'."]
def _check_smoketest(self, use_aimport=True):
"""
Functional test for the automatic reloader using either
'%autoreload 1' or '%autoreload 2'
"""
mod_name, mod_fn = self.new_module(
"""
x = 9
z = 123 # this item will be deleted
def foo(y):
return y + 3
| TestAutoreload |
python | numba__numba | numba/tests/test_import.py | {
"start": 107,
"end": 4197
} | class ____(TestCase):
"""
Test behaviour of importing Numba.
"""
def test_laziness(self):
"""
Importing top-level numba features should not import too many modules.
"""
# A heuristic set of modules that shouldn't be imported immediately
banlist = ['cffi',
'distutils',
'numba.cuda',
'numba.cpython.mathimpl',
'numba.cpython.randomimpl',
'numba.tests',
'numba.core.typing.collections',
'numba.core.typing.listdecl',
'numba.core.typing.npdatetime',
]
# Sanity check the modules still exist...
for mod in banlist:
distutils_check = (mod != 'distutils' or
utils.PYVERSION < (3, 12))
if mod not in ('cffi',) and distutils_check:
__import__(mod)
code = """if 1:
from numba import jit, vectorize
from numba.core import types
import sys
print(list(sys.modules))
"""
out, _ = run_in_subprocess(code)
modlist = set(eval(out.strip()))
unexpected = set(banlist) & set(modlist)
self.assertFalse(unexpected, "some modules unexpectedly imported")
def test_no_impl_import(self):
"""
Tests that importing jit does not trigger import of modules containing
lowering implementations that would likely install things in the
builtins registry and have side effects impacting other targets
"""
# None of these modules should be imported through the process of
# doing 'import numba' or 'from numba import njit'
banlist = ['numba.cpython.slicing',
'numba.cpython.tupleobj',
'numba.cpython.enumimpl',
'numba.cpython.hashing',
'numba.cpython.heapq',
'numba.cpython.iterators',
'numba.cpython.numbers',
'numba.cpython.rangeobj',
'numba.cpython.cmathimpl',
'numba.cpython.mathimpl',
'numba.cpython.printimpl',
'numba.cpython.randomimpl',
'numba.core.optional',
'numba.misc.gdb_hook',
'numba.misc.literal',
'numba.misc.cffiimpl',
'numba.np.linalg',
'numba.np.polynomial',
'numba.np.arraymath',
'numba.np.npdatetime',
'numba.np.npyimpl',
'numba.typed.typeddict',
'numba.typed.typedlist',
'numba.experimental.jitclass.base',]
code1 = """if 1:
import sys
import numba
print(list(sys.modules))
"""
code2 = """if 1:
import sys
from numba import njit
@njit
def foo():
pass
print(list(sys.modules))
"""
for code in (code1, code2):
out, _ = run_in_subprocess(code)
modlist = set(eval(out.strip()))
unexpected = set(banlist) & set(modlist)
self.assertFalse(unexpected, "some modules unexpectedly imported")
def test_no_accidental_warnings(self):
# checks that importing Numba isn't accidentally triggering warnings due
# to e.g. deprecated use of import locations from Python's stdlib
code = "import numba"
# See: https://github.com/numba/numba/issues/6831
# bug in setuptools/packaging causing a deprecation warning
flags = ["-Werror", "-Wignore::DeprecationWarning:packaging.version:"]
run_in_subprocess(code, flags)
def test_import_star(self):
# checks that "from numba import *" works.
code = "from numba import *"
run_in_subprocess(code)
if __name__ == '__main__':
unittest.main()
| TestNumbaImport |
python | joblib__joblib | joblib/_parallel_backends.py | {
"start": 18068,
"end": 19655
} | class ____(PoolManagerMixin, ParallelBackendBase):
"""A ParallelBackend which will use a thread pool to execute batches in.
This is a low-overhead backend but it suffers from the Python Global
Interpreter Lock if the called function relies a lot on Python objects.
Mostly useful when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped in a "with
nogil" block or an expensive call to a library such as NumPy).
The actual thread pool is lazily initialized: the actual thread pool
construction is delayed to the first call to apply_async.
ThreadingBackend is used as the default backend for nested calls.
"""
supports_retrieve_callback = True
uses_threads = True
supports_sharedmem = True
def configure(self, n_jobs=1, parallel=None, **backend_kwargs):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self.effective_n_jobs(n_jobs)
if n_jobs == 1:
# Avoid unnecessary overhead and use sequential backend instead.
raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level))
self.parallel = parallel
self._n_jobs = n_jobs
return n_jobs
def _get_pool(self):
"""Lazily initialize the thread pool
The actual pool of worker threads is only initialized at the first
call to apply_async.
"""
if self._pool is None:
self._pool = ThreadPool(self._n_jobs)
return self._pool
| ThreadingBackend |
python | google__jax | jax/_src/interpreters/pxla.py | {
"start": 51032,
"end": 52391
} | class ____:
# `out_avals` is the `Array` global avals when using pjit. It is the
# local one when using `pmap`.
__slots__ = ("handlers", "out_shardings", "out_avals")
def __init__(self, handlers, out_shardings, out_avals):
self.handlers = handlers
self.out_shardings = out_shardings
self.out_avals = out_avals
def __call__(self, out_bufs):
return [h(bufs) for h, bufs in safe_zip(self.handlers, out_bufs)]
def local_avals_to_results_handler(
unmapped_local_out_avals: Sequence[ShapedArray],
local_shardings: Sequence[JSharding]) -> ResultsHandler:
out_indices = [tuple(s.devices_indices_map(aval.shape).values())
for s, aval in safe_zip(local_shardings, unmapped_local_out_avals)]
handlers = [
local_aval_to_result_handler(aval, s, idcs)
for aval, s, idcs in safe_zip(unmapped_local_out_avals, local_shardings, out_indices)
]
return ResultsHandler(handlers, local_shardings, unmapped_local_out_avals)
def global_avals_to_results_handler(
global_out_avals: Sequence[ShapedArray],
shardings: Sequence[JSharding],
committed: bool) -> ResultsHandler:
handlers = [
global_aval_to_result_handler(global_aval, s, committed)
for global_aval, s in safe_zip(global_out_avals, shardings)
]
return ResultsHandler(handlers, shardings, global_out_avals)
| ResultsHandler |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 27049,
"end": 27836
} | class ____(object):
def __init__(self,ibuf):
assert isinstance(ibuf,InputBuffer)
self.input = ibuf
self.column = 1
self.line = 1
self.tokenStartColumn = 1
self.tokenStartLine = 1
self.guessing = 0
self.filename = None
def reset(self):
self.column = 1
self.line = 1
self.tokenStartColumn = 1
self.tokenStartLine = 1
self.guessing = 0
self.filename = None
self.input.reset()
def LA(self,k):
return self.input.LA(k)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenStream ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| LexerSharedInputState |
python | pytorch__pytorch | torch/_inductor/fuzzer.py | {
"start": 1878,
"end": 2324
} | class ____(CustomPartitionerFn):
"""
A Dummy partitioner function to be used by ConfigFuzzer
"""
def __call__(
self, gm: torch.fx.GraphModule, joint_inputs: Sequence[object], **kwargs: Any
) -> tuple[torch.fx.GraphModule, torch.fx.GraphModule]:
return min_cut_rematerialization_partition(gm, joint_inputs, **kwargs)
def uuid(self) -> Optional[Any]:
return None
T = TypeVar("T")
| DummyPartitionerFn |
python | huggingface__transformers | src/transformers/models/gemma3n/modeling_gemma3n.py | {
"start": 69064,
"end": 72702
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Gemma3nConfig, layer_idx: int):
super().__init__()
self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = config.query_pre_attn_scalar**-0.5
self.attention_dropout = self.config.attention_dropout
self.is_causal = not getattr(config, "use_bidirectional_attention", False)
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
self.attn_logit_softcapping = self.config.attn_logit_softcapping
self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=self.sliding_window,
softcap=self.attn_logit_softcapping,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Gemma3nAttention |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/sort_by_all.py | {
"start": 122,
"end": 168
} | class ____:
pass
def foobar():
pass
| Quux |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/dialects/mysql/mysql_stuff.py | {
"start": 236,
"end": 509
} | class ____(Base):
__tablename__ = "test_table_json"
id = mapped_column(Integer, primary_key=True)
data: Mapped[str] = mapped_column()
insert(Test).on_duplicate_key_update(
{"id": 42, Test.data: 99}, [("foo", 44)], data=99, id="foo"
).inserted.foo.desc()
| Test |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/orchestrator/orchestrator/models/ci_report.py | {
"start": 1055,
"end": 1898
} | class ____(BaseModel):
file_path: str
connector_technical_name: Optional[str] = None
connector_version: Optional[str] = None
run_timestamp: Optional[str] = None
run_duration: Optional[float] = None
success: Optional[bool] = None
failed_steps: Optional[List[str]] = None
successful_steps: Optional[List[str]] = None
skipped_steps: Optional[List[str]] = None
gha_workflow_run_url: Optional[str] = None
pipeline_start_timestamp: Optional[int] = None
pipeline_end_timestamp: Optional[int] = None
pipeline_duration: Optional[int] = None
git_branch: Optional[str] = None
git_revision: Optional[str] = None
ci_context: Optional[str] = None
cdk_version: Optional[str] = None
html_report_url: Optional[str] = None
class Config:
extra = Extra.allow
| ConnectorPipelineReport |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/too_many_arguments.py | {
"start": 1260,
"end": 1416
} | class ____:
# `__new__` counts args like a classmethod
# even though it is an implicit staticmethod
def __new__(cls,a,b,c,d,e): # Ok
...
| Foo |
python | hynek__structlog | src/structlog/_output.py | {
"start": 2939,
"end": 3395
} | class ____:
r"""
Produce `PrintLogger`\ s.
To be used with `structlog.configure`\ 's ``logger_factory``.
Args:
file: File to print to. (default: `sys.stdout`)
Positional arguments are silently ignored.
.. versionadded:: 0.4.0
"""
def __init__(self, file: TextIO | None = None):
self._file = file
def __call__(self, *args: Any) -> PrintLogger:
return PrintLogger(self._file)
| PrintLoggerFactory |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/diag_test.py | {
"start": 434,
"end": 1253
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, dim, M, N, diagonal, out, device):
self.inputs = {
"input": torch.rand(M, N, device=device)
if dim == 2
else torch.rand(M, device=device),
"diagonal": diagonal,
"out": out,
"out_tensor": torch.tensor(
(),
device=device,
),
}
self.set_module_name("diag")
def forward(self, input, diagonal: int, out: bool, out_tensor):
if out:
return torch.diag(input, diagonal=diagonal, out=out_tensor)
else:
return torch.diag(input, diagonal=diagonal)
op_bench.generate_pt_test(diag_configs_short, DiagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| DiagBenchmark |
python | facebook__pyre-check | stubs/integration_test/fixture_source/integration_test/constructor_tito.py | {
"start": 498,
"end": 890
} | class ____(ParentWithoutConstructor):
def __init__(self, arg):
super(ChildWithoutParentConstructor, self).__init__(arg)
def test1():
tainted = source()
child = ChildWithParentConstructor(tainted)
sink(child.arg) # Issue.
def test2():
tainted = source()
child = ChildWithoutParentConstructor(tainted)
sink(child.arg) # Issue.
| ChildWithoutParentConstructor |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_sparkline03.py | {
"start": 345,
"end": 5501
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.name = "Sheet1"
worksheet.excel_version = 2010
data = [-2, 2, 3, -1, 0]
worksheet.write_row("A1", data)
worksheet.write_row("A2", data)
# Set up sparklines.
worksheet.add_sparkline("F1", {"range": "Sheet1!A1:E1"})
worksheet.add_sparkline("F2", {"range": "Sheet1!A2:E2"})
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1:E2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData>
<row r="1" spans="1:5" x14ac:dyDescent="0.25">
<c r="A1">
<v>-2</v>
</c>
<c r="B1">
<v>2</v>
</c>
<c r="C1">
<v>3</v>
</c>
<c r="D1">
<v>-1</v>
</c>
<c r="E1">
<v>0</v>
</c>
</row>
<row r="2" spans="1:5" x14ac:dyDescent="0.25">
<c r="A2">
<v>-2</v>
</c>
<c r="B2">
<v>2</v>
</c>
<c r="C2">
<v>3</v>
</c>
<c r="D2">
<v>-1</v>
</c>
<c r="E2">
<v>0</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup displayEmptyCellsAs="gap">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A2:E2</xm:f>
<xm:sqref>F2</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
<x14:sparklineGroup displayEmptyCellsAs="gap">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A1:E1</xm:f>
<xm:sqref>F1</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | ray-project__ray | python/ray/_private/test_utils.py | {
"start": 50851,
"end": 53466
} | class ____(NodeKillerBase):
def __init__(self, *args, grace_period_s: int = 30, **kwargs):
super().__init__(*args, **kwargs)
self._grace_period_s = grace_period_s
self._kill_threads: Set[threading.Thread] = set()
def _kill_resource(self, node_id, node_to_kill_ip, _):
assert node_id not in self.killed
# Clean up any completed threads.
for thread in self._kill_threads.copy():
if not thread.is_alive():
thread.join()
self._kill_threads.remove(thread)
def _kill_node_with_grace_period(node_id, node_to_kill_ip):
self._drain_node(node_id)
time.sleep(self._grace_period_s)
# Anyscale extends the drain deadline if you shut down the instance
# directly. To work around this, we force-stop Ray on the node. Anyscale
# should then terminate it shortly after without updating the drain
# deadline.
_execute_command_on_node("ray stop --force", node_to_kill_ip)
logger.info(f"Starting killing thread {node_id=}, {node_to_kill_ip=}")
thread = threading.Thread(
target=_kill_node_with_grace_period,
args=(node_id, node_to_kill_ip),
daemon=True,
)
thread.start()
self._kill_threads.add(thread)
self.killed.add(node_id)
def _drain_node(self, node_id: str) -> None:
# We need to lazily import this object. Otherwise, Ray can't serialize the
# class.
from ray.core.generated import autoscaler_pb2
assert ray.NodeID.from_hex(node_id) != ray.NodeID.nil()
logging.info(f"Draining node {node_id=}")
address = services.canonicalize_bootstrap_address_or_die(addr="auto")
gcs_client = ray._raylet.GcsClient(address=address)
deadline_timestamp_ms = (time.time_ns() // 1e6) + (self._grace_period_s * 1e3)
try:
is_accepted, _ = gcs_client.drain_node(
node_id,
autoscaler_pb2.DrainNodeReason.Value("DRAIN_NODE_REASON_PREEMPTION"),
"",
deadline_timestamp_ms,
)
except ray.exceptions.RayError as e:
logger.error(f"Failed to drain node {node_id=}")
raise e
assert is_accepted, "Drain node request was rejected"
def _cleanup(self):
for thread in self._kill_threads.copy():
thread.join()
self._kill_threads.remove(thread)
assert not self._kill_threads
@ray.remote(num_cpus=0)
| EC2InstanceTerminatorWithGracePeriod |
python | ray-project__ray | rllib/connectors/agent/view_requirement.py | {
"start": 500,
"end": 5288
} | class ____(AgentConnector):
"""This connector does 2 things:
1. It filters data columns based on view_requirements for training and inference.
2. It buffers the right amount of history for computing the sample batch for
action computation.
The output of this connector is AgentConnectorsOut, which basically is
a tuple of 2 things:
{
"raw_dict": {"obs": ...}
"sample_batch": SampleBatch
}
raw_dict, which contains raw data for the latest time slice,
can be used to construct a complete episode by Sampler for training purpose.
The "for_action" SampleBatch can be used to directly call the policy.
"""
def __init__(self, ctx: ConnectorContext):
super().__init__(ctx)
self._view_requirements = ctx.view_requirements
_enable_new_api_stack = False
# a dict of env_id to a dict of agent_id to a list of agent_collector objects
self.agent_collectors = defaultdict(
lambda: defaultdict(
lambda: AgentCollector(
self._view_requirements,
max_seq_len=ctx.config["model"]["max_seq_len"],
intial_states=ctx.initial_states,
disable_action_flattening=ctx.config.get(
"_disable_action_flattening", False
),
is_policy_recurrent=ctx.is_policy_recurrent,
# Note(jungong): We only leverage AgentCollector for building sample
# batches for computing actions.
# So regardless of whether this ViewRequirement connector is in
# training or inference mode, we should tell these AgentCollectors
# to behave in inference mode, so they don't accumulate episode data
# that is not useful for inference.
is_training=False,
_enable_new_api_stack=_enable_new_api_stack,
)
)
)
def reset(self, env_id: str):
if env_id in self.agent_collectors:
del self.agent_collectors[env_id]
def transform(self, ac_data: AgentConnectorDataType) -> AgentConnectorDataType:
d = ac_data.data
assert (
type(d) is dict
), "Single agent data must be of type Dict[str, TensorStructType]"
env_id = ac_data.env_id
agent_id = ac_data.agent_id
# TODO: we don't keep episode_id around so use env_id as episode_id ?
episode_id = env_id if SampleBatch.EPS_ID not in d else d[SampleBatch.EPS_ID]
assert env_id is not None and agent_id is not None, (
f"ViewRequirementAgentConnector requires env_id({env_id}) "
"and agent_id({agent_id})"
)
assert (
self._view_requirements
), "ViewRequirements required by ViewRequirementAgentConnector"
# Note(jungong) : we need to keep the entire input dict here.
# A column may be used by postprocessing (GAE) even if its
# view_requirement.used_for_training is False.
training_dict = d
agent_collector = self.agent_collectors[env_id][agent_id]
if SampleBatch.NEXT_OBS not in d:
raise ValueError(f"connector data {d} should contain next_obs.")
# TODO(avnishn; kourosh) Unsure how agent_index is necessary downstream
# since there is no mapping from agent_index to agent_id that exists.
# need to remove this from the SampleBatch later.
# fall back to using dummy index if no index is available
if SampleBatch.AGENT_INDEX in d:
agent_index = d[SampleBatch.AGENT_INDEX]
else:
try:
agent_index = float(agent_id)
except ValueError:
agent_index = -1
if agent_collector.is_empty():
agent_collector.add_init_obs(
episode_id=episode_id,
agent_index=agent_index,
env_id=env_id,
init_obs=d[SampleBatch.NEXT_OBS],
init_infos=d.get(SampleBatch.INFOS),
)
else:
agent_collector.add_action_reward_next_obs(d)
sample_batch = agent_collector.build_for_inference()
return_data = AgentConnectorDataType(
env_id, agent_id, AgentConnectorsOutput(training_dict, sample_batch)
)
return return_data
def to_state(self):
return ViewRequirementAgentConnector.__name__, None
@staticmethod
def from_state(ctx: ConnectorContext, params: Any):
return ViewRequirementAgentConnector(ctx)
register_connector(
ViewRequirementAgentConnector.__name__, ViewRequirementAgentConnector
)
| ViewRequirementAgentConnector |
python | celery__celery | t/unit/events/test_cursesmon.py | {
"start": 47,
"end": 122
} | class ____:
def getmaxyx(self):
return self.y, self.x
| MockWindow |
python | pydata__xarray | xarray/tests/test_parallelcompat.py | {
"start": 582,
"end": 1502
} | class ____(np.ndarray):
"""
Mock-up of a chunked array class.
Adds a (non-functional) .chunks attribute by following this example in the numpy docs
https://numpy.org/doc/stable/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray
"""
chunks: T_NormalizedChunks
def __new__(
cls,
shape,
dtype=float,
buffer=None,
offset=0,
strides=None,
order=None,
chunks=None,
):
obj = super().__new__(cls, shape, dtype, buffer, offset, strides, order)
obj.chunks = chunks
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.chunks = getattr(obj, "chunks", None) # type: ignore[assignment]
def rechunk(self, chunks, **kwargs):
copied = self.copy()
copied.chunks = chunks
return copied
| DummyChunkedArray |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-twilio/components.py | {
"start": 5922,
"end": 8131
} | class ____(StateMigration):
"""
Reshape Message Media state to include hierarchical parent slices back to the
Messages collection. Low-code derives `message_media` partitions from `messages`,
so the state must retain the media-level `subresource_uri` and also include
`parent_slice.subresource_uri` pointing to the Messages collection
(e.g., “…/Messages.json”). States missing `partition.subresource_uri` are skipped.
Initial:
{
"states": [
{
"partition": { "subresource_uri": "/2010-04-01/Accounts/AC123/Messages/SM123/Media.json" },
"cursor": { "date_created": "2022-11-01T00:00:00Z" }
}
]
}
Final:
{
"states": [
{
"partition": {
"subresource_uri": "/2010-04-01/Accounts/AC123/Messages/SM123/Media.json",
"parent_slice": {
"subresource_uri": "/2010-04-01/Accounts/AC123/Messages.json",
"parent_slice": {}
}
},
"cursor": { "date_created": "2022-11-01T00:00:00Z" }
}
]
}
"""
def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
new_state = {"states": []}
for state in stream_state.get("states", []):
partition_state = {}
if not "partition" in state or "subresource_uri" not in state["partition"]:
continue
partition_state["partition"] = {
"subresource_uri": state["partition"]["subresource_uri"],
"parent_slice": {
"subresource_uri": state["partition"]["subresource_uri"].split("Messages")[0] + "Messages.json",
"parent_slice": {},
},
}
partition_state["cursor"] = state.get("cursor", {})
new_state["states"].append(partition_state)
return new_state
def should_migrate(self, stream_state: Mapping[str, Any]) -> bool:
if stream_state and any("parent_slice" not in state["partition"] for state in stream_state.get("states", [])):
return True
return False
| TwilioMessageMediaStateMigration |
python | django-guardian__django-guardian | example_project/posts/models.py | {
"start": 63,
"end": 631
} | class ____(models.Model):
title = models.CharField("title", max_length=64)
slug = models.SlugField(max_length=64)
content = models.TextField("content")
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
default_permissions = ("add", "change", "delete")
permissions = (("view_post", "Can view post"),)
get_latest_by = "created_at"
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("posts_post_detail", args=(), kwargs={"slug": self.slug})
| Post |
python | PyCQA__bandit | tests/unit/core/test_blacklisting.py | {
"start": 158,
"end": 1316
} | class ____(testtools.TestCase):
def test_report_issue(self):
data = {"level": "HIGH", "message": "test {name}", "id": "B000"}
issue = blacklisting.report_issue(data, "name")
issue_dict = issue.as_dict(with_code=False)
self.assertIsInstance(issue_dict, dict)
self.assertEqual("B000", issue_dict["test_id"])
self.assertEqual("HIGH", issue_dict["issue_severity"])
self.assertEqual({}, issue_dict["issue_cwe"])
self.assertEqual("HIGH", issue_dict["issue_confidence"])
self.assertEqual("test name", issue_dict["issue_text"])
def test_report_issue_defaults(self):
data = {"message": "test {name}"}
issue = blacklisting.report_issue(data, "name")
issue_dict = issue.as_dict(with_code=False)
self.assertIsInstance(issue_dict, dict)
self.assertEqual("LEGACY", issue_dict["test_id"])
self.assertEqual("MEDIUM", issue_dict["issue_severity"])
self.assertEqual({}, issue_dict["issue_cwe"])
self.assertEqual("HIGH", issue_dict["issue_confidence"])
self.assertEqual("test name", issue_dict["issue_text"])
| BlacklistingTests |
python | PyCQA__pylint | tests/functional/u/undefined/undefined_variable_py30.py | {
"start": 2172,
"end": 2500
} | class ____(metaclass=abc.Metaclass):
"""Metaclasses can come from imported modules."""
# The following used to raise used-before-assignment
# pylint: disable=missing-docstring, multiple-statements
def used_before_assignment(*, arg): return arg + 1
# Test for #4021
# https://github.com/pylint-dev/pylint/issues/4021
| FifthGood |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 61655,
"end": 62318
} | class ____(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt-full",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
while frame:
if frame.is_python_frame():
frame.print_summary()
frame = frame.older()
PyBacktraceFull()
| PyBacktraceFull |
python | pytorch__pytorch | torch/distributed/tensor/experimental/_context_parallel/_attention.py | {
"start": 7077,
"end": 7357
} | class ____(ABC):
@abstractmethod
def __init__(self, pg: dist.ProcessGroup, seq_dim: int) -> None: ...
@abstractmethod
def exchange_buffers(self, curr_buffer: torch.Tensor) -> None: ...
@abstractmethod
def next_buffer(self) -> torch.Tensor: ...
| _RingRotater |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_bool.py | {
"start": 568,
"end": 15812
} | class ____(__TestCase):
def test_subclass(self):
try:
with torch._dynamo.error_on_graph_break(False):
class C(bool):
pass
except TypeError:
pass
else:
self.fail("bool should not be subclassable")
self.assertRaises(TypeError, int.__new__, bool, 0)
def test_repr(self):
self.assertEqual(repr(False), 'False')
self.assertEqual(repr(True), 'True')
self.assertIs(eval(repr(False)), False)
self.assertIs(eval(repr(True)), True)
def test_str(self):
self.assertEqual(str(False), 'False')
self.assertEqual(str(True), 'True')
def test_int(self):
self.assertEqual(int(False), 0)
self.assertIsNot(int(False), False)
self.assertEqual(int(True), 1)
self.assertIsNot(int(True), True)
def test_float(self):
self.assertEqual(float(False), 0.0)
self.assertIsNot(float(False), False)
self.assertEqual(float(True), 1.0)
self.assertIsNot(float(True), True)
def test_complex(self):
self.assertEqual(complex(False), 0j)
self.assertEqual(complex(False), False)
self.assertEqual(complex(True), 1+0j)
self.assertEqual(complex(True), True)
def test_math(self):
self.assertEqual(+False, 0)
self.assertIsNot(+False, False)
self.assertEqual(-False, 0)
self.assertIsNot(-False, False)
self.assertEqual(abs(False), 0)
self.assertIsNot(abs(False), False)
self.assertEqual(+True, 1)
self.assertIsNot(+True, True)
self.assertEqual(-True, -1)
self.assertEqual(abs(True), 1)
self.assertIsNot(abs(True), True)
with self.assertWarns(DeprecationWarning):
# We need to put the bool in a variable, because the constant
# ~False is evaluated at compile time due to constant folding;
# consequently the DeprecationWarning would be issued during
# module loading and not during test execution.
false = False
self.assertEqual(~false, -1)
with self.assertWarns(DeprecationWarning):
# also check that the warning is issued in case of constant
# folding at compile time
self.assertEqual(eval("~False"), -1)
with self.assertWarns(DeprecationWarning):
true = True
self.assertEqual(~true, -2)
with self.assertWarns(DeprecationWarning):
self.assertEqual(eval("~True"), -2)
self.assertEqual(False+2, 2)
self.assertEqual(True+2, 3)
self.assertEqual(2+False, 2)
self.assertEqual(2+True, 3)
self.assertEqual(False+False, 0)
self.assertIsNot(False+False, False)
self.assertEqual(False+True, 1)
self.assertIsNot(False+True, True)
self.assertEqual(True+False, 1)
self.assertIsNot(True+False, True)
self.assertEqual(True+True, 2)
self.assertEqual(True-True, 0)
self.assertIsNot(True-True, False)
self.assertEqual(False-False, 0)
self.assertIsNot(False-False, False)
self.assertEqual(True-False, 1)
self.assertIsNot(True-False, True)
self.assertEqual(False-True, -1)
self.assertEqual(True*1, 1)
self.assertEqual(False*1, 0)
self.assertIsNot(False*1, False)
self.assertEqual(True/1, 1)
self.assertIsNot(True/1, True)
self.assertEqual(False/1, 0)
self.assertIsNot(False/1, False)
self.assertEqual(True%1, 0)
self.assertIsNot(True%1, False)
self.assertEqual(True%2, 1)
self.assertIsNot(True%2, True)
self.assertEqual(False%1, 0)
self.assertIsNot(False%1, False)
for b in False, True:
for i in 0, 1, 2:
self.assertEqual(b**i, int(b)**i)
self.assertIsNot(b**i, bool(int(b)**i))
for a in False, True:
for b in False, True:
self.assertIs(a&b, bool(int(a)&int(b)))
self.assertIs(a|b, bool(int(a)|int(b)))
self.assertIs(a^b, bool(int(a)^int(b)))
self.assertEqual(a&int(b), int(a)&int(b))
self.assertIsNot(a&int(b), bool(int(a)&int(b)))
self.assertEqual(a|int(b), int(a)|int(b))
self.assertIsNot(a|int(b), bool(int(a)|int(b)))
self.assertEqual(a^int(b), int(a)^int(b))
self.assertIsNot(a^int(b), bool(int(a)^int(b)))
self.assertEqual(int(a)&b, int(a)&int(b))
self.assertIsNot(int(a)&b, bool(int(a)&int(b)))
self.assertEqual(int(a)|b, int(a)|int(b))
self.assertIsNot(int(a)|b, bool(int(a)|int(b)))
self.assertEqual(int(a)^b, int(a)^int(b))
self.assertIsNot(int(a)^b, bool(int(a)^int(b)))
self.assertIs(1==1, True)
self.assertIs(1==0, False)
self.assertIs(0<1, True)
self.assertIs(1<0, False)
self.assertIs(0<=0, True)
self.assertIs(1<=0, False)
self.assertIs(1>0, True)
self.assertIs(1>1, False)
self.assertIs(1>=1, True)
self.assertIs(0>=1, False)
self.assertIs(0!=1, True)
self.assertIs(0!=0, False)
x = [1]
self.assertIs(x is x, True)
self.assertIs(x is not x, False)
self.assertIs(1 in x, True)
self.assertIs(0 in x, False)
self.assertIs(1 not in x, False)
self.assertIs(0 not in x, True)
x = {1: 2}
self.assertIs(x is x, True)
self.assertIs(x is not x, False)
self.assertIs(1 in x, True)
self.assertIs(0 in x, False)
self.assertIs(1 not in x, False)
self.assertIs(0 not in x, True)
self.assertIs(not True, False)
self.assertIs(not False, True)
def test_convert(self):
self.assertRaises(TypeError, bool, 42, 42)
self.assertIs(bool(10), True)
self.assertIs(bool(1), True)
self.assertIs(bool(-1), True)
self.assertIs(bool(0), False)
self.assertIs(bool("hello"), True)
self.assertIs(bool(""), False)
self.assertIs(bool(), False)
def test_keyword_args(self):
with self.assertRaisesRegex(TypeError, 'keyword argument'):
bool(x=10)
def test_format(self):
self.assertEqual("%d" % False, "0")
self.assertEqual("%d" % True, "1")
self.assertEqual("%x" % False, "0")
self.assertEqual("%x" % True, "1")
def test_hasattr(self):
self.assertIs(hasattr([], "append"), True)
self.assertIs(hasattr([], "wobble"), False)
def test_callable(self):
self.assertIs(callable(len), True)
self.assertIs(callable(1), False)
def test_isinstance(self):
self.assertIs(isinstance(True, bool), True)
self.assertIs(isinstance(False, bool), True)
self.assertIs(isinstance(True, int), True)
self.assertIs(isinstance(False, int), True)
self.assertIs(isinstance(1, bool), False)
self.assertIs(isinstance(0, bool), False)
def test_issubclass(self):
self.assertIs(issubclass(bool, int), True)
self.assertIs(issubclass(int, bool), False)
def test_contains(self):
self.assertIs(1 in {}, False)
self.assertIs(1 in {1:1}, True)
def test_string(self):
self.assertIs("xyz".endswith("z"), True)
self.assertIs("xyz".endswith("x"), False)
self.assertIs("xyz0123".isalnum(), True)
self.assertIs("@#$%".isalnum(), False)
self.assertIs("xyz".isalpha(), True)
self.assertIs("@#$%".isalpha(), False)
self.assertIs("0123".isdigit(), True)
self.assertIs("xyz".isdigit(), False)
self.assertIs("xyz".islower(), True)
self.assertIs("XYZ".islower(), False)
self.assertIs("0123".isdecimal(), True)
self.assertIs("xyz".isdecimal(), False)
self.assertIs("0123".isnumeric(), True)
self.assertIs("xyz".isnumeric(), False)
self.assertIs(" ".isspace(), True)
self.assertIs("\xa0".isspace(), True)
self.assertIs("\u3000".isspace(), True)
self.assertIs("XYZ".isspace(), False)
self.assertIs("X".istitle(), True)
self.assertIs("x".istitle(), False)
self.assertIs("XYZ".isupper(), True)
self.assertIs("xyz".isupper(), False)
self.assertIs("xyz".startswith("x"), True)
self.assertIs("xyz".startswith("z"), False)
def test_boolean(self):
self.assertEqual(True & 1, 1)
self.assertNotIsInstance(True & 1, bool)
self.assertIs(True & True, True)
self.assertEqual(True | 1, 1)
self.assertNotIsInstance(True | 1, bool)
self.assertIs(True | True, True)
self.assertEqual(True ^ 1, 0)
self.assertNotIsInstance(True ^ 1, bool)
self.assertIs(True ^ True, False)
def test_fileclosed(self):
try:
with open(os_helper.TESTFN, "w", encoding="utf-8") as f:
self.assertIs(f.closed, False)
self.assertIs(f.closed, True)
finally:
os.remove(os_helper.TESTFN)
def test_types(self):
# types are always true.
for t in [bool, complex, dict, float, int, list, object,
set, str, tuple, type]:
self.assertIs(bool(t), True)
def test_operator(self):
import operator
self.assertIs(operator.truth(0), False)
self.assertIs(operator.truth(1), True)
self.assertIs(operator.not_(1), False)
self.assertIs(operator.not_(0), True)
self.assertIs(operator.contains([], 1), False)
self.assertIs(operator.contains([1], 1), True)
self.assertIs(operator.lt(0, 0), False)
self.assertIs(operator.lt(0, 1), True)
self.assertIs(operator.is_(True, True), True)
self.assertIs(operator.is_(True, False), False)
self.assertIs(operator.is_not(True, True), False)
self.assertIs(operator.is_not(True, False), True)
def test_marshal(self):
import marshal
self.assertIs(marshal.loads(marshal.dumps(True)), True)
self.assertIs(marshal.loads(marshal.dumps(False)), False)
def test_pickle(self):
import pickle
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertIs(pickle.loads(pickle.dumps(True, proto)), True)
self.assertIs(pickle.loads(pickle.dumps(False, proto)), False)
def test_picklevalues(self):
# Test for specific backwards-compatible pickle values
import pickle
self.assertEqual(pickle.dumps(True, protocol=0), b"I01\n.")
self.assertEqual(pickle.dumps(False, protocol=0), b"I00\n.")
self.assertEqual(pickle.dumps(True, protocol=1), b"I01\n.")
self.assertEqual(pickle.dumps(False, protocol=1), b"I00\n.")
self.assertEqual(pickle.dumps(True, protocol=2), b'\x80\x02\x88.')
self.assertEqual(pickle.dumps(False, protocol=2), b'\x80\x02\x89.')
def test_convert_to_bool(self):
# Verify that TypeError occurs when bad things are returned
# from __bool__(). This isn't really a bool test, but
# it's related.
check = lambda o: self.assertRaises(TypeError, bool, o)
with torch._dynamo.error_on_graph_break(False):
class Foo(object):
def __bool__(self):
return self
check(Foo())
with torch._dynamo.error_on_graph_break(False):
class Bar(object):
def __bool__(self):
return "Yes"
check(Bar())
with torch._dynamo.error_on_graph_break(False):
class Baz(int):
def __bool__(self):
return self
check(Baz())
# __bool__() must return a bool not an int
with torch._dynamo.error_on_graph_break(False):
class Spam(int):
def __bool__(self):
return 1
check(Spam())
with torch._dynamo.error_on_graph_break(False):
class Eggs:
def __len__(self):
return -1
self.assertRaises(ValueError, bool, Eggs())
def test_interpreter_convert_to_bool_raises(self):
with torch._dynamo.error_on_graph_break(False):
class SymbolicBool:
def __bool__(self):
raise TypeError
class Symbol:
def __gt__(self, other):
return SymbolicBool()
x = Symbol()
with self.assertRaises(TypeError):
if x > 0:
msg = "x > 0 was true"
else:
msg = "x > 0 was false"
# This used to create negative refcounts, see gh-102250
del x
def test_from_bytes(self):
self.assertIs(bool.from_bytes(b'\x00'*8, 'big'), False)
self.assertIs(bool.from_bytes(b'abcd', 'little'), True)
def test_sane_len(self):
# this test just tests our assumptions about __len__
# this will start failing if __len__ changes assertions
for badval in ['illegal', -1, 1 << 32]:
with torch._dynamo.error_on_graph_break(False):
class A:
def __len__(self):
return badval
try:
bool(A())
except (Exception) as e_bool:
try:
len(A())
except (Exception) as e_len:
self.assertEqual(str(e_bool), str(e_len))
def test_blocked(self):
with torch._dynamo.error_on_graph_break(False):
class A:
__bool__ = None
self.assertRaises(TypeError, bool, A())
with torch._dynamo.error_on_graph_break(False):
class B:
def __len__(self):
return 10
__bool__ = None
self.assertRaises(TypeError, bool, B())
def test_real_and_imag(self):
self.assertEqual(True.real, 1)
self.assertEqual(True.imag, 0)
self.assertIs(type(True.real), int)
self.assertIs(type(True.imag), int)
self.assertEqual(False.real, 0)
self.assertEqual(False.imag, 0)
self.assertIs(type(False.real), int)
self.assertIs(type(False.imag), int)
def test_bool_called_at_least_once(self):
with torch._dynamo.error_on_graph_break(False):
class X:
def __init__(self):
self.count = 0
def __bool__(self):
self.count += 1
return True
def f(x):
if x or True:
pass
x = X()
f(x)
self.assertGreaterEqual(x.count, 1)
def test_bool_new(self):
self.assertIs(bool.__new__(bool), False)
self.assertIs(bool.__new__(bool, 1), True)
self.assertIs(bool.__new__(bool, 0), False)
self.assertIs(bool.__new__(bool, False), False)
self.assertIs(bool.__new__(bool, True), True)
if __name__ == "__main__":
run_tests()
| BoolTest |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_value_lengths_to_equal.py | {
"start": 2205,
"end": 13982
} | class ____(ColumnMapExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
ExpectColumnValueLengthsToEqual is a \
Column Map Expectation.
Column Map Expectations are one of the most common types of Expectation.
They are evaluated for a single column and ask a yes/no question for every row in that column.
Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
value (int): \
{VALUE_DESCRIPTION}
Other Parameters:
mostly (None or a float between 0 and 1): \
{MOSTLY_DESCRIPTION} \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly). Default 1.
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectColumnValueLengthsToBeBetween](https://greatexpectations.io/expectations/expect_column_value_lengths_to_be_between)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[13]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 "12345" "A"
1 "abcde" "13579"
2 "1b3d5" "24680"
Code Examples:
Passing Case:
Input:
ExpectColumnValueLengthsToEqual(
column="test",
value=5
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 0.0,
"unexpected_percent_nonmissing": 0.0
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnValueLengthsToEqual(
column="test2",
value=5
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 1,
"unexpected_percent": 33.33333333333333,
"partial_unexpected_list": [
"A"
],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 33.33333333333333,
"unexpected_percent_nonmissing": 33.33333333333333
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
value: Union[float, SuiteParameterDict] = pydantic.Field(description=VALUE_DESCRIPTION)
# This dictionary contains metadata for display in the public gallery
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
map_metric = "column_values.value_length.equals"
success_keys = ("value", "mostly")
args_keys = (
"column",
"value",
)
class Config:
title = "Expect column value lengths to equal"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectColumnValueLengthsToEqual]
) -> None:
ColumnMapExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
def _prescriptive_template(
cls, renderer_configuration: RendererConfiguration
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("value", RendererValueType.NUMBER),
("mostly", RendererValueType.NUMBER),
("strict_min", RendererValueType.BOOLEAN),
("strict_max", RendererValueType.BOOLEAN),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if not params.value:
template_str = "values may have any length."
else:
template_str = "values must be $value characters long"
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "value", "mostly", "row_condition", "condition_parser"],
)
if params.get("value") is None:
template_str = "values may have any length."
else:
template_str = "values must be $value characters long"
if params["mostly"] is not None:
if isinstance(params["mostly"], (int, float)) and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") # noqa: E501 # FIXME CoP
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
| ExpectColumnValueLengthsToEqual |
python | numba__numba | numba/core/extending.py | {
"start": 17855,
"end": 18971
} | class ____(collections.namedtuple(
'_SentryLiteralArgs', ['literal_args'])):
"""
Parameters
----------
literal_args : Sequence[str]
A sequence of names for literal arguments
Examples
--------
The following line:
>>> SentryLiteralArgs(literal_args).for_pysig(pysig).bind(*args, **kwargs)
is equivalent to:
>>> sentry_literal_args(pysig, literal_args, args, kwargs)
"""
def for_function(self, func):
"""Bind the sentry to the signature of *func*.
Parameters
----------
func : Function
A python function.
Returns
-------
obj : BoundLiteralArgs
"""
return self.for_pysig(utils.pysignature(func))
def for_pysig(self, pysig):
"""Bind the sentry to the given signature *pysig*.
Parameters
----------
pysig : inspect.Signature
Returns
-------
obj : BoundLiteralArgs
"""
return BoundLiteralArgs(
pysig=pysig,
literal_args=self.literal_args,
)
| SentryLiteralArgs |
python | ray-project__ray | rllib/evaluation/tests/test_rollout_worker.py | {
"start": 1372,
"end": 2055
} | class ____(RandomPolicy):
@override(RandomPolicy)
def compute_actions(
self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs
):
return np.array([random.choice([0, 1])] * len(obs_batch)), [], {}
@override(Policy)
def postprocess_trajectory(self, batch, other_agent_batches=None, episode=None):
assert episode is not None
super().postprocess_trajectory(batch, other_agent_batches, episode)
return compute_advantages(batch, 100.0, 0.9, use_gae=False, use_critic=False)
| MockPolicy |
python | huggingface__transformers | src/transformers/models/maskformer/modeling_maskformer.py | {
"start": 61262,
"end": 62956
} | class ____(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int = 3):
"""
A classic Multi Layer Perceptron (MLP).
Args:
input_dim (`int`):
The input dimensions.
hidden_dim (`int`):
The hidden dimensions.
output_dim (`int`):
The output dimensions.
num_layers (int, *optional*, defaults to 3):
The number of layers.
"""
super().__init__()
in_dims = [input_dim] + [hidden_dim] * (num_layers - 1)
out_dims = [hidden_dim] * (num_layers - 1) + [output_dim]
self.layers = []
for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):
activation = nn.ReLU() if i < num_layers - 1 else nn.Identity()
layer = PredictionBlock(in_dim, out_dim, activation=activation)
self.layers.append(layer)
# Provide backwards compatibility from when the class inherited from nn.Sequential
# In nn.Sequential subclasses, the name given to the layer is its index in the sequence.
# In nn.Module subclasses they derived from the instance attribute they are assigned to e.g.
# self.my_layer_name = Layer()
# We can't give instance attributes integer names i.e. self.0 is not permitted and so need to register
# explicitly
self.add_module(str(i), layer)
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
| MaskformerMLPPredictionHead |
python | PyCQA__pylint | tests/functional/m/missing/missing_docstring.py | {
"start": 258,
"end": 313
} | class ____:
"""It has a docstring."""
| ClassDocumented |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 105122,
"end": 105259
} | class ____(BaseModel, extra="forbid"):
shard_id: int = Field(..., description="")
peer_id: int = Field(..., description="")
| Replica |
python | tornadoweb__tornado | tornado/test/simple_httpclient_test.py | {
"start": 2423,
"end": 2571
} | class ____(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
| OptionsHandler |
python | django__django | tests/admin_inlines/models.py | {
"start": 7727,
"end": 7906
} | class ____(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey("self", models.SET_NULL, null=True, blank=True)
# Models for #19524
| BinaryTree |
python | django-debug-toolbar__django-debug-toolbar | tests/panels/test_alerts.py | {
"start": 141,
"end": 4452
} | class ____(BaseTestCase):
panel_id = "AlertsPanel"
def test_alert_warning_display(self):
"""
Test that the panel (does not) display[s] an alert when there are
(no) problems.
"""
self.panel.record_stats({"alerts": []})
self.assertNotIn("alerts", self.panel.nav_subtitle)
self.panel.record_stats({"alerts": ["Alert 1", "Alert 2"]})
self.assertIn("2 alerts", self.panel.nav_subtitle)
def test_file_form_without_enctype_multipart_form_data(self):
"""
Test that the panel displays a form invalid message when there is
a file input but encoding not set to multipart/form-data.
"""
test_form = '<form id="test-form"><input type="file"></form>'
result = self.panel.check_invalid_file_form_configuration(test_form)
expected_error = (
'Form with id "test-form" contains file input, '
'but does not have the attribute enctype="multipart/form-data".'
)
self.assertEqual(result[0]["alert"], expected_error)
self.assertEqual(len(result), 1)
def test_file_form_no_id_without_enctype_multipart_form_data(self):
"""
Test that the panel displays a form invalid message when there is
a file input but encoding not set to multipart/form-data.
This should use the message when the form has no id.
"""
test_form = '<form><input type="file"></form>'
result = self.panel.check_invalid_file_form_configuration(test_form)
expected_error = (
"Form contains file input, but does not have "
'the attribute enctype="multipart/form-data".'
)
self.assertEqual(result[0]["alert"], expected_error)
self.assertEqual(len(result), 1)
def test_file_form_with_enctype_multipart_form_data(self):
test_form = """<form id="test-form" enctype="multipart/form-data">
<input type="file">
</form>"""
result = self.panel.check_invalid_file_form_configuration(test_form)
self.assertEqual(len(result), 0)
def test_file_form_with_enctype_multipart_form_data_in_button(self):
test_form = """<form id="test-form">
<input type="file">
<input type="submit" formenctype="multipart/form-data">
</form>"""
result = self.panel.check_invalid_file_form_configuration(test_form)
self.assertEqual(len(result), 0)
def test_referenced_file_input_without_enctype_multipart_form_data(self):
test_file_input = """<form id="test-form"></form>
<input type="file" form = "test-form">"""
result = self.panel.check_invalid_file_form_configuration(test_file_input)
expected_error = (
'Input element references form with id "test-form", '
'but the form does not have the attribute enctype="multipart/form-data".'
)
self.assertEqual(result[0]["alert"], expected_error)
self.assertEqual(len(result), 1)
def test_referenced_file_input_with_enctype_multipart_form_data(self):
test_file_input = """<form id="test-form" enctype="multipart/form-data">
</form>
<input type="file" form = "test-form">"""
result = self.panel.check_invalid_file_form_configuration(test_file_input)
self.assertEqual(len(result), 0)
def test_integration_file_form_without_enctype_multipart_form_data(self):
t = Template('<form id="test-form"><input type="file"></form>')
c = Context({})
rendered_template = t.render(c)
response = HttpResponse(content=rendered_template)
self.panel.generate_stats(self.request, response)
self.assertIn("1 alert", self.panel.nav_subtitle)
self.assertIn(
"Form with id "test-form" contains file input, "
"but does not have the attribute enctype="multipart/form-data".",
self.panel.content,
)
def test_streaming_response(self):
"""Test to check for a streaming response."""
def _render():
yield "ok"
response = StreamingHttpResponse(_render())
self.panel.generate_stats(self.request, response)
self.assertEqual(self.panel.get_stats(), {"alerts": []})
| AlertsPanelTestCase |
python | google__pytype | pytype/pytd/optimize.py | {
"start": 3071,
"end": 3811
} | class ____:
"""Mutable class for collecting return types and exceptions of functions.
The collecting is stable: Items are kept in the order in which they were
encountered.
Attributes:
return_types: Return types seen so far.
exceptions: Exceptions seen so far.
"""
def __init__(self):
self.return_types = []
self.exceptions = []
def Update(self, signature):
"""Add the return types / exceptions of a signature to this instance."""
if signature.return_type not in self.return_types:
self.return_types.append(signature.return_type)
self.exceptions.extend(
exception
for exception in signature.exceptions
if exception not in self.exceptions
)
| _ReturnsAndExceptions |
python | django__django | tests/staticfiles_tests/test_checks.py | {
"start": 464,
"end": 4725
} | class ____(CollectionTestCase):
run_collectstatic_in_setUp = False
def test_base_finder_check_not_implemented(self):
finder = BaseFinder()
msg = (
"subclasses may provide a check() method to verify the finder is "
"configured correctly."
)
with self.assertRaisesMessage(NotImplementedError, msg):
finder.check()
def test_check_finders(self):
"""check_finders() concatenates all errors."""
error1 = Error("1")
error2 = Error("2")
error3 = Error("3")
def get_finders():
class Finder1(BaseFinder):
def check(self, **kwargs):
return [error1]
class Finder2(BaseFinder):
def check(self, **kwargs):
return []
class Finder3(BaseFinder):
def check(self, **kwargs):
return [error2, error3]
class Finder4(BaseFinder):
pass
return [Finder1(), Finder2(), Finder3(), Finder4()]
with mock.patch("django.contrib.staticfiles.checks.get_finders", get_finders):
errors = check_finders(None)
self.assertEqual(errors, [error1, error2, error3])
def test_no_errors_with_test_settings(self):
self.assertEqual(check_finders(None), [])
@override_settings(STATICFILES_DIRS="a string")
def test_dirs_not_tuple_or_list(self):
self.assertEqual(
check_finders(None),
[
Error(
"The STATICFILES_DIRS setting is not a tuple or list.",
hint="Perhaps you forgot a trailing comma?",
id="staticfiles.E001",
)
],
)
def test_dirs_contains_static_root(self):
with self.settings(STATICFILES_DIRS=[settings.STATIC_ROOT]):
self.assertEqual(
check_finders(None),
[
Error(
"The STATICFILES_DIRS setting should not contain the "
"STATIC_ROOT setting.",
id="staticfiles.E002",
)
],
)
def test_dirs_contains_static_root_in_tuple(self):
with self.settings(STATICFILES_DIRS=[("prefix", settings.STATIC_ROOT)]):
self.assertEqual(
check_finders(None),
[
Error(
"The STATICFILES_DIRS setting should not contain the "
"STATIC_ROOT setting.",
id="staticfiles.E002",
)
],
)
def test_prefix_contains_trailing_slash(self):
static_dir = Path(TEST_ROOT) / "project" / "documents"
with self.settings(STATICFILES_DIRS=[("prefix/", static_dir)]):
self.assertEqual(
check_finders(None),
[
Error(
"The prefix 'prefix/' in the STATICFILES_DIRS setting must "
"not end with a slash.",
id="staticfiles.E003",
),
],
)
def test_nonexistent_directories(self):
with self.settings(
STATICFILES_DIRS=[
"/fake/path",
("prefix", "/fake/prefixed/path"),
]
):
self.assertEqual(
check_finders(None),
[
Warning(
"The directory '/fake/path' in the STATICFILES_DIRS "
"setting does not exist.",
id="staticfiles.W004",
),
Warning(
"The directory '/fake/prefixed/path' in the "
"STATICFILES_DIRS setting does not exist.",
id="staticfiles.W004",
),
],
)
# Nonexistent directories are skipped.
finder = get_finder("django.contrib.staticfiles.finders.FileSystemFinder")
self.assertEqual(list(finder.list(None)), [])
| FindersCheckTests |
python | spack__spack | lib/spack/spack/test/oci/mock_registry.py | {
"start": 11805,
"end": 13044
} | class ____(InMemoryOCIRegistry):
"""This is another in-memory OCI registry requiring basic authentication."""
def __init__(
self, domain, username: str, password: str, realm: str, allow_single_post: bool = True
) -> None:
super().__init__(domain, allow_single_post)
self.username = username
self.password = password
self.realm = realm
self.router.add_middleware(self.authenticate)
def authenticate(self, req: Request):
# Any request needs an Authorization header
authorization = req.get_header("Authorization")
if authorization is None:
raise MiddlewareError(self.unauthorized())
# Ensure that the username and password are correct
assert authorization.startswith("Basic ")
auth = base64.b64decode(authorization[6:]).decode("utf-8")
username, password = auth.split(":", 1)
if username != self.username or password != self.password:
raise MiddlewareError(self.unauthorized())
return req
def unauthorized(self):
return MockHTTPResponse(
401, "Unauthorized", {"www-authenticate": f'Basic realm="{self.realm}"'}
)
| InMemoryOCIRegistryWithBasicAuth |
python | plotly__plotly.py | plotly/graph_objs/volume/_surface.py | {
"start": 233,
"end": 6688
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "volume"
_path_str = "volume.surface"
_valid_props = {"count", "fill", "pattern", "show"}
@property
def count(self):
"""
Sets the number of iso-surfaces between minimum and maximum
iso-values. By default this value is 2 meaning that only
minimum and maximum surfaces would be drawn.
The 'count' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["count"]
@count.setter
def count(self, val):
self["count"] = val
@property
def fill(self):
"""
Sets the fill ratio of the iso-surface. The default fill value
of the surface is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def pattern(self):
"""
Sets the surface pattern of the iso-surface 3-D sections. The
default pattern of the surface is `all` meaning that the rest
of surface elements would be shaded. The check options (either
1 or 2) could be used to draw half of the squares on the
surface. Using various combinations of capital `A`, `B`, `C`,
`D` and `E` may also be used to reduce the number of triangles
on the iso-surfaces and creating other patterns of interest.
The 'pattern' property is a flaglist and may be specified
as a string containing:
- Any combination of ['A', 'B', 'C', 'D', 'E'] joined with '+' characters
(e.g. 'A+B')
OR exactly one of ['all', 'odd', 'even'] (e.g. 'even')
Returns
-------
Any
"""
return self["pattern"]
@pattern.setter
def pattern(self, val):
self["pattern"] = val
@property
def show(self):
"""
Hides/displays surfaces between minimum and maximum iso-values.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
@property
def _prop_descriptions(self):
return """\
count
Sets the number of iso-surfaces between minimum and
maximum iso-values. By default this value is 2 meaning
that only minimum and maximum surfaces would be drawn.
fill
Sets the fill ratio of the iso-surface. The default
fill value of the surface is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
pattern
Sets the surface pattern of the iso-surface 3-D
sections. The default pattern of the surface is `all`
meaning that the rest of surface elements would be
shaded. The check options (either 1 or 2) could be used
to draw half of the squares on the surface. Using
various combinations of capital `A`, `B`, `C`, `D` and
`E` may also be used to reduce the number of triangles
on the iso-surfaces and creating other patterns of
interest.
show
Hides/displays surfaces between minimum and maximum
iso-values.
"""
def __init__(
self, arg=None, count=None, fill=None, pattern=None, show=None, **kwargs
):
"""
Construct a new Surface object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.Surface`
count
Sets the number of iso-surfaces between minimum and
maximum iso-values. By default this value is 2 meaning
that only minimum and maximum surfaces would be drawn.
fill
Sets the fill ratio of the iso-surface. The default
fill value of the surface is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
pattern
Sets the surface pattern of the iso-surface 3-D
sections. The default pattern of the surface is `all`
meaning that the rest of surface elements would be
shaded. The check options (either 1 or 2) could be used
to draw half of the squares on the surface. Using
various combinations of capital `A`, `B`, `C`, `D` and
`E` may also be used to reduce the number of triangles
on the iso-surfaces and creating other patterns of
interest.
show
Hides/displays surfaces between minimum and maximum
iso-values.
Returns
-------
Surface
"""
super().__init__("surface")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.volume.Surface
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.Surface`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("count", arg, count)
self._set_property("fill", arg, fill)
self._set_property("pattern", arg, pattern)
self._set_property("show", arg, show)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Surface |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 148908,
"end": 150090
} | class ____(TestCase):
def test_no_exceptions_pass(self):
iterable = '0123'
actual = list(mi.filter_except(int, iterable))
expected = ['0', '1', '2', '3']
self.assertEqual(actual, expected)
def test_no_exceptions_raise(self):
iterable = ['0', '1', 'two', '3']
with self.assertRaises(ValueError):
list(mi.filter_except(int, iterable))
def test_raise(self):
iterable = ['0', '12', 'three', None]
with self.assertRaises(TypeError):
list(mi.filter_except(int, iterable, ValueError))
def test_false(self):
# Even if the validator returns false, we pass through
validator = lambda x: False
iterable = ['0', '1', '2', 'three', None]
actual = list(mi.filter_except(validator, iterable, Exception))
expected = ['0', '1', '2', 'three', None]
self.assertEqual(actual, expected)
def test_multiple(self):
iterable = ['0', '1', '2', 'three', None, '4']
actual = list(mi.filter_except(int, iterable, ValueError, TypeError))
expected = ['0', '1', '2', '4']
self.assertEqual(actual, expected)
| FilterExceptTests |
python | pypa__pipenv | pipenv/vendor/pipdeptree/_models/dag.py | {
"start": 820,
"end": 10056
} | class ____(Mapping[DistPackage, List[ReqPackage]]):
"""
Representation of Package dependencies as directed acyclic graph using a dict as the underlying datastructure.
The nodes and their relationships (edges) are internally stored using a map as follows,
{a: [b, c],
b: [d],
c: [d, e],
d: [e],
e: [],
f: [b],
g: [e, f]}
Here, node `a` has 2 children nodes `b` and `c`. Consider edge direction from `a` -> `b` and `a` -> `c`
respectively.
A node is expected to be an instance of a subclass of `Package`. The keys are must be of class `DistPackage` and
each item in values must be of class `ReqPackage`. (See also ReversedPackageDAG where the key and value types are
interchanged).
"""
@classmethod
def from_pkgs(cls, pkgs: list[Distribution]) -> PackageDAG:
warning_printer = get_warning_printer()
dist_pkgs = [DistPackage(p) for p in pkgs]
idx = {p.key: p for p in dist_pkgs}
m: dict[DistPackage, list[ReqPackage]] = {}
dist_name_to_invalid_reqs_dict: dict[str, list[str]] = {}
for p in dist_pkgs:
reqs = []
requires_iterator = p.requires()
while True:
try:
req = next(requires_iterator)
except InvalidRequirementError as err:
# We can't work with invalid requirement strings. Let's warn the user about them.
if warning_printer.should_warn():
dist_name_to_invalid_reqs_dict.setdefault(p.project_name, []).append(str(err))
continue
except StopIteration:
break
d = idx.get(canonicalize_name(req.name))
# Distribution.requires only returns the name of requirements in the metadata file, which may not be the
# same as the name in PyPI. We should try to retain the original package names for requirements.
# See https://github.com/tox-dev/pipdeptree/issues/242
req.name = d.project_name if d is not None else req.name
pkg = ReqPackage(req, d)
reqs.append(pkg)
m[p] = reqs
should_print_warning = warning_printer.should_warn() and dist_name_to_invalid_reqs_dict
if should_print_warning:
warning_printer.print_multi_line(
"Invalid requirement strings found for the following distributions",
lambda: render_invalid_reqs_text(dist_name_to_invalid_reqs_dict),
)
return cls(m)
def __init__(self, m: dict[DistPackage, list[ReqPackage]]) -> None:
"""
Initialize the PackageDAG object.
:param dict m: dict of node objects (refer class docstring)
:returns: None
:rtype: NoneType
"""
self._obj: dict[DistPackage, list[ReqPackage]] = m
self._index: dict[str, DistPackage] = {p.key: p for p in list(self._obj)}
def get_node_as_parent(self, node_key: str) -> DistPackage | None:
"""
Get the node from the keys of the dict representing the DAG.
This method is useful if the dict representing the DAG contains different kind of objects in keys and values.
Use this method to look up a node obj as a parent (from the keys of the dict) given a node key.
:param node_key: identifier corresponding to key attr of node obj
:returns: node obj (as present in the keys of the dict)
"""
try:
return self._index[node_key]
except KeyError:
return None
def get_children(self, node_key: str) -> list[ReqPackage]:
"""
Get child nodes for a node by its key.
:param node_key: key of the node to get children of
:returns: child nodes
"""
node = self.get_node_as_parent(node_key)
return self._obj[node] if node else []
def filter_nodes(self, include: list[str] | None, exclude: set[str] | None) -> PackageDAG: # noqa: C901, PLR0912
"""
Filter nodes in a graph by given parameters.
If a node is included, then all it's children are also included.
:param include: list of node keys to include (or None)
:param exclude: set of node keys to exclude (or None)
:raises ValueError: If include has node keys that do not exist in the graph
:returns: filtered version of the graph
"""
# If neither of the filters are specified, short circuit
if include is None and exclude is None:
return self
include_with_casing_preserved: list[str] = []
if include:
include_with_casing_preserved = include
include = [canonicalize_name(i) for i in include]
exclude = {canonicalize_name(s) for s in exclude} if exclude else set()
# Check for mutual exclusion of show_only and exclude sets
# after normalizing the values to lowercase
if include and exclude:
assert not (set(include) & exclude)
# Traverse the graph in a depth first manner and filter the
# nodes according to `show_only` and `exclude` sets
stack: deque[DistPackage] = deque()
m: dict[DistPackage, list[ReqPackage]] = {}
seen = set()
matched_includes: set[str] = set()
for node in self._obj:
if any(fnmatch(node.key, e) for e in exclude):
continue
if include is None:
stack.append(node)
else:
should_append = False
for i in include:
if fnmatch(node.key, i):
# Add all patterns that match with the node key. Otherwise if we break, patterns like py* or
# pytest* (which both should match "pytest") may cause one pattern to be missed and will
# raise an error
matched_includes.add(i)
should_append = True
if should_append:
stack.append(node)
while stack:
n = stack.pop()
cldn = [c for c in self._obj[n] if not any(fnmatch(c.key, e) for e in exclude)]
m[n] = cldn
seen.add(n.key)
for c in cldn:
if c.key not in seen:
cld_node = self.get_node_as_parent(c.key)
if cld_node:
stack.append(cld_node)
else:
# It means there's no root node corresponding to the child node i.e.
# a dependency is missing
continue
non_existent_includes = [
i for i in include_with_casing_preserved if canonicalize_name(i) not in matched_includes
]
if non_existent_includes:
raise ValueError("No packages matched using the following patterns: " + ", ".join(non_existent_includes))
return self.__class__(m)
def reverse(self) -> ReversedPackageDAG:
"""
Reverse the DAG, or turn it upside-down.
In other words, the directions of edges of the nodes in the DAG will be reversed.
Note that this function purely works on the nodes in the graph. This implies that to perform a combination of
filtering and reversing, the order in which `filter` and `reverse` methods should be applied is important. For
e.g., if reverse is called on a filtered graph, then only the filtered nodes and it's children will be
considered when reversing. On the other hand, if filter is called on reversed DAG, then the definition of
"child" nodes is as per the reversed DAG.
:returns: DAG in the reversed form
"""
m: defaultdict[ReqPackage, list[DistPackage]] = defaultdict(list)
child_keys = {r.key for r in chain.from_iterable(self._obj.values())}
for k, vs in self._obj.items():
for v in vs:
# if v is already added to the dict, then ensure that
# we are using the same object. This check is required
# as we're using array mutation
node: ReqPackage = next((p for p in m if p.key == v.key), v)
m[node].append(k.as_parent_of(v))
if k.key not in child_keys:
m[k.as_requirement()] = []
return ReversedPackageDAG(dict(m)) # type: ignore[arg-type]
def sort(self) -> PackageDAG:
"""
Return sorted tree in which the underlying _obj dict is an dict, sorted alphabetically by the keys.
:returns: Instance of same class with dict
"""
return self.__class__({k: sorted(v) for k, v in sorted(self._obj.items())})
# Methods required by the abstract base class Mapping
def __getitem__(self, arg: DistPackage) -> list[ReqPackage] | None: # type: ignore[override]
return self._obj.get(arg)
def __iter__(self) -> Iterator[DistPackage]:
return self._obj.__iter__()
def __len__(self) -> int:
return len(self._obj)
| PackageDAG |
python | tiangolo__fastapi | docs_src/app_testing/app_b_an/main.py | {
"start": 373,
"end": 1205
} | class ____(BaseModel):
id: str
title: str
description: Union[str, None] = None
@app.get("/items/{item_id}", response_model=Item)
async def read_main(item_id: str, x_token: Annotated[str, Header()]):
if x_token != fake_secret_token:
raise HTTPException(status_code=400, detail="Invalid X-Token header")
if item_id not in fake_db:
raise HTTPException(status_code=404, detail="Item not found")
return fake_db[item_id]
@app.post("/items/", response_model=Item)
async def create_item(item: Item, x_token: Annotated[str, Header()]):
if x_token != fake_secret_token:
raise HTTPException(status_code=400, detail="Invalid X-Token header")
if item.id in fake_db:
raise HTTPException(status_code=409, detail="Item already exists")
fake_db[item.id] = item
return item
| Item |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 273300,
"end": 274569
} | class ____(ConditionalValueDefstringExprRef):
"""
ConditionalPredicateValueDefstringExprRef schema wrapper.
Parameters
----------
test : str, dict, :class:`Predicate`, :class:`FieldGTPredicate`, :class:`FieldLTPredicate`, :class:`FieldGTEPredicate`, :class:`FieldLTEPredicate`, :class:`LogicalOrPredicate`, :class:`ParameterPredicate`, :class:`FieldEqualPredicate`, :class:`FieldOneOfPredicate`, :class:`FieldRangePredicate`, :class:`FieldValidPredicate`, :class:`LogicalAndPredicate`, :class:`LogicalNotPredicate`, :class:`PredicateComposition`
Predicate for triggering the condition
value : str, dict, :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_schema = {"$ref": "#/definitions/ConditionalPredicate<ValueDef<(string|ExprRef)>>"}
def __init__(
self,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(test=test, value=value, **kwds)
| ConditionalPredicateValueDefstringExprRef |
python | cython__cython | Cython/Plex/Errors.py | {
"start": 566,
"end": 976
} | class ____(PlexError):
scanner = None
position = None
state_name = None
def __init__(self, scanner, state_name):
self.scanner = scanner
self.position = scanner.get_position()
self.state_name = state_name
def __str__(self):
return ("'%s', line %d, char %d: Token not recognised in state %r" % (
self.position + (self.state_name,)))
| UnrecognizedInput |
python | apache__airflow | providers/fab/tests/unit/fab/www/views/test_views_custom_user_views.py | {
"start": 2969,
"end": 6454
} | class ____:
@pytest.fixture(autouse=True)
def app_context(self, app):
with app.app_context():
delete_roles(app)
yield
delete_user(app, "no_access")
delete_user(app, "has_access")
@pytest.mark.parametrize(("url", "_", "expected_text"), PERMISSIONS_TESTS_PARAMS)
def test_user_model_view_without_access(self, url, expected_text, _, app, client):
user_without_access = create_user(
app,
username="no_access",
role_name="role_no_access",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
client = client_with_login(
app,
username="no_access",
password="no_access",
)
response = client.get(url.replace("{user.id}", str(user_without_access.id)), follow_redirects=False)
assert response.status_code == 302
assert response.location.startswith("/login/")
@pytest.mark.parametrize(("url", "permission", "expected_text"), PERMISSIONS_TESTS_PARAMS)
def test_user_model_view_with_access(self, url, permission, expected_text, app, client):
user_with_access = create_user(
app,
username="has_access",
role_name="role_has_access",
permissions=[(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE), permission],
)
client = client_with_login(
app,
username="has_access",
password="has_access",
)
response = client.get(url.replace("{user.id}", str(user_with_access.id)), follow_redirects=True)
check_content_in_response(expected_text, response)
def test_user_model_view_without_delete_access(self, app, client):
user_to_delete = create_user(
app,
username="user_to_delete",
role_name="user_to_delete",
)
create_user(
app,
username="no_access",
role_name="role_no_access",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
client = client_with_login(
app,
username="no_access",
password="no_access",
)
response = client.post(f"/users/delete/{user_to_delete.id}", follow_redirects=False)
assert response.status_code == 302
assert response.location.startswith("/login/")
assert bool(get_auth_manager().security_manager.get_user_by_id(user_to_delete.id)) is True
def test_user_model_view_with_delete_access(self, app, client):
user_to_delete = create_user(
app,
username="user_to_delete",
role_name="user_to_delete",
)
create_user(
app,
username="has_access",
role_name="role_has_access",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_USER),
],
)
client = client_with_login(
app,
username="has_access",
password="has_access",
)
client.post(f"/users/delete/{user_to_delete.id}", follow_redirects=False)
assert bool(get_auth_manager().security_manager.get_user_by_id(user_to_delete.id)) is False
| TestSecurity |
python | doocs__leetcode | lcof2/剑指 Offer II 095. 最长公共子序列/Solution.py | {
"start": 0,
"end": 453
} | class ____:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
m, n = len(text1), len(text2)
f = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if text1[i - 1] == text2[j - 1]:
f[i][j] = f[i - 1][j - 1] + 1
else:
f[i][j] = max(f[i - 1][j], f[i][j - 1])
return f[-1][-1]
| Solution |
python | pypa__pip | src/pip/_vendor/rich/progress.py | {
"start": 32381,
"end": 36471
} | class ____:
"""Information regarding a progress task.
This object should be considered read-only outside of the :class:`~Progress` class.
"""
id: TaskID
"""Task ID associated with this task (used in Progress methods)."""
description: str
"""str: Description of the task."""
total: Optional[float]
"""Optional[float]: Total number of steps in this task."""
completed: float
"""float: Number of steps completed"""
_get_time: GetTimeCallable
"""Callable to get the current time."""
finished_time: Optional[float] = None
"""float: Time task was finished."""
visible: bool = True
"""bool: Indicates if this task is visible in the progress display."""
fields: Dict[str, Any] = field(default_factory=dict)
"""dict: Arbitrary fields passed in via Progress.update."""
start_time: Optional[float] = field(default=None, init=False, repr=False)
"""Optional[float]: Time this task was started, or None if not started."""
stop_time: Optional[float] = field(default=None, init=False, repr=False)
"""Optional[float]: Time this task was stopped, or None if not stopped."""
finished_speed: Optional[float] = None
"""Optional[float]: The last speed for a finished task."""
_progress: Deque[ProgressSample] = field(
default_factory=lambda: deque(maxlen=1000), init=False, repr=False
)
_lock: RLock = field(repr=False, default_factory=RLock)
"""Thread lock."""
def get_time(self) -> float:
"""float: Get the current time, in seconds."""
return self._get_time()
@property
def started(self) -> bool:
"""bool: Check if the task as started."""
return self.start_time is not None
@property
def remaining(self) -> Optional[float]:
"""Optional[float]: Get the number of steps remaining, if a non-None total was set."""
if self.total is None:
return None
return self.total - self.completed
@property
def elapsed(self) -> Optional[float]:
"""Optional[float]: Time elapsed since task was started, or ``None`` if the task hasn't started."""
if self.start_time is None:
return None
if self.stop_time is not None:
return self.stop_time - self.start_time
return self.get_time() - self.start_time
@property
def finished(self) -> bool:
"""Check if the task has finished."""
return self.finished_time is not None
@property
def percentage(self) -> float:
"""float: Get progress of task as a percentage. If a None total was set, returns 0"""
if not self.total:
return 0.0
completed = (self.completed / self.total) * 100.0
completed = min(100.0, max(0.0, completed))
return completed
@property
def speed(self) -> Optional[float]:
"""Optional[float]: Get the estimated speed in steps per second."""
if self.start_time is None:
return None
with self._lock:
progress = self._progress
if not progress:
return None
total_time = progress[-1].timestamp - progress[0].timestamp
if total_time == 0:
return None
iter_progress = iter(progress)
next(iter_progress)
total_completed = sum(sample.completed for sample in iter_progress)
speed = total_completed / total_time
return speed
@property
def time_remaining(self) -> Optional[float]:
"""Optional[float]: Get estimated time to completion, or ``None`` if no data."""
if self.finished:
return 0.0
speed = self.speed
if not speed:
return None
remaining = self.remaining
if remaining is None:
return None
estimate = ceil(remaining / speed)
return estimate
def _reset(self) -> None:
"""Reset progress."""
self._progress.clear()
self.finished_time = None
self.finished_speed = None
| Task |
python | jazzband__django-simple-history | simple_history/tests/tests/test_utils.py | {
"start": 23363,
"end": 23823
} | class ____(TestCase):
def test_update_change_reason_with_excluded_fields(self):
poll = PollWithExcludeFields(
question="what's up?", pub_date=timezone.now(), place="The Pub"
)
poll.save()
update_change_reason(poll, "Test change reason.")
most_recent = poll.history.order_by("-history_date").first()
self.assertEqual(most_recent.history_change_reason, "Test change reason.")
| UpdateChangeReasonTestCase |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/visitors.py | {
"start": 12416,
"end": 14288
} | class ____:
"""base for classes that have a "traverse internals" element,
which defines all kinds of ways of traversing the elements of an object.
Compared to :class:`.Visitable`, which relies upon an external visitor to
define how the object is travered (i.e. the :class:`.SQLCompiler`), the
:class:`.HasTraverseInternals` interface allows classes to define their own
traversal, that is, what attributes are accessed and in what order.
"""
__slots__ = ()
_traverse_internals: _TraverseInternalsType
_is_immutable: bool = False
@util.preload_module("sqlalchemy.sql.traversals")
def get_children(
self, *, omit_attrs: Tuple[str, ...] = (), **kw: Any
) -> Iterable[HasTraverseInternals]:
r"""Return immediate child :class:`.visitors.HasTraverseInternals`
elements of this :class:`.visitors.HasTraverseInternals`.
This is used for visit traversal.
\**kw may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
traversals = util.preloaded.sql_traversals
try:
traverse_internals = self._traverse_internals
except AttributeError:
# user-defined classes may not have a _traverse_internals
return []
dispatch = traversals._get_children.run_generated_dispatch
return itertools.chain.from_iterable(
meth(obj, **kw)
for attrname, obj, meth in dispatch(
self, traverse_internals, "_generated_get_children_traversal"
)
if attrname not in omit_attrs and obj is not None
)
| HasTraverseInternals |
python | getsentry__sentry | tests/sentry/middleware/test_subdomain.py | {
"start": 2919,
"end": 3401
} | class ____(Endpoint):
permission_classes = (AllowAny,)
def get(self, request):
# return HttpResponse(status=status.HTTP_200_OK)
return Response(
{
"subdomain": request.subdomain,
}
)
urlpatterns = [
re_path(
r"^api/0/test/$",
APITestEndpoint.as_view(),
name="test-endpoint",
),
]
@no_silo_test
@override_settings(ROOT_URLCONF=__name__, SENTRY_SELF_HOSTED=False)
| APITestEndpoint |
python | kamyu104__LeetCode-Solutions | Python/binary-searchable-numbers-in-an-unsorted-array.py | {
"start": 30,
"end": 546
} | class ____(object):
def binarySearchableNumbers(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
right = [float("inf")]*(len(nums)+1)
for i in reversed(xrange(1, len(nums)+1)):
right[i-1] = min(right[i], nums[i-1])
result, left = set(), float("-inf")
for i in xrange(len(nums)):
if left <= nums[i] <= right[i+1]:
result.add(nums[i])
left = max(left, nums[i])
return len(result)
| Solution |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 27904,
"end": 30075
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layers = nn.ModuleList(
[
nn.Linear(
config.num_mel_bins if i == 0 else config.speech_decoder_prenet_units,
config.speech_decoder_prenet_units,
)
for i in range(config.speech_decoder_prenet_layers)
]
)
self.final_layer = nn.Linear(config.speech_decoder_prenet_units, config.hidden_size)
self.encode_positions = SpeechT5ScaledPositionalEncoding(
config.positional_dropout,
config.hidden_size,
config.max_speech_positions,
)
self.speaker_embeds_layer = nn.Linear(config.speaker_embedding_dim + config.hidden_size, config.hidden_size)
def _consistent_dropout(self, inputs_embeds, p):
mask = torch.bernoulli(inputs_embeds[0], p=p)
all_masks = mask.unsqueeze(0).repeat(inputs_embeds.size(0), 1, 1)
return torch.where(all_masks == 1, inputs_embeds, 0) * 1 / (1 - p)
def forward(
self,
input_values: torch.Tensor,
speaker_embeddings: Optional[torch.Tensor] = None,
):
# Dropout is always applied, even when evaluating. See §2.2 in https://huggingface.co/papers/1712.05884.
inputs_embeds = input_values
for layer in self.layers:
inputs_embeds = nn.functional.relu(layer(inputs_embeds))
inputs_embeds = self._consistent_dropout(inputs_embeds, self.config.speech_decoder_prenet_dropout)
inputs_embeds = self.final_layer(inputs_embeds)
inputs_embeds = self.encode_positions(inputs_embeds)
if speaker_embeddings is not None:
speaker_embeddings = nn.functional.normalize(speaker_embeddings)
speaker_embeddings = speaker_embeddings.unsqueeze(1).expand(-1, inputs_embeds.size(1), -1)
inputs_embeds = torch.cat([inputs_embeds, speaker_embeddings], dim=-1)
inputs_embeds = nn.functional.relu(self.speaker_embeds_layer(inputs_embeds))
return inputs_embeds
| SpeechT5SpeechDecoderPrenet |
python | numba__numba | numba/tests/test_array_constants.py | {
"start": 956,
"end": 4578
} | class ____(TestCase):
"""
Test array constants.
"""
def check_array_const(self, pyfunc):
cfunc = njit((types.int32,))(pyfunc)
for i in [0, 1, 2]:
np.testing.assert_array_equal(pyfunc(i), cfunc(i))
def test_array_const_0d(self):
self.check_array_const(getitem0)
def test_array_const_1d_contig(self):
self.check_array_const(getitem1)
def test_array_const_1d_noncontig(self):
self.check_array_const(getitem2)
def test_array_const_2d(self):
self.check_array_const(getitem3)
def test_record_array_const_contig(self):
self.check_array_const(getitem4)
def test_record_array_const_noncontig(self):
self.check_array_const(getitem5)
def test_array_const_alignment(self):
"""
Issue #1933: the array declaration in the LLVM IR must have
the right alignment specified.
"""
sig = (types.intp,)
cfunc = jit(sig, nopython=True)(getitem6)
ir = cfunc.inspect_llvm(sig)
for line in ir.splitlines():
if 'XXXX_array_contents_XXXX' in line:
self.assertIn("constant [24 x i8]", line) # sanity check
# Should be the ABI-required alignment for float32
# on most platforms...
self.assertIn(", align 4", line)
break
else:
self.fail("could not find array declaration in LLVM IR")
def test_arrayscalar_const(self):
pyfunc = use_arrayscalar_const
cfunc = njit((),)(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def test_write_to_global_array(self):
pyfunc = write_to_global_array
with self.assertRaises(TypingError):
njit((),)(pyfunc)
def test_issue_1850(self):
"""
This issue is caused by an unresolved bug in numpy since version 1.6.
See numpy GH issue #3147.
"""
constarr = np.array([86])
def pyfunc():
return constarr[0]
cfunc = njit((),)(pyfunc)
out = cfunc()
self.assertEqual(out, 86)
@TestCase.run_test_in_subprocess # isolate MCJIT use
def test_too_big_to_freeze(self):
"""
Test issue https://github.com/numba/numba/issues/2188 where freezing
a constant array into the code that's prohibitively long and consumes
too much RAM.
"""
def test(biggie):
expect = np.copy(biggie)
self.assertEqual(typeof(biggie), typeof(expect))
def pyfunc():
return biggie
cfunc = njit((),)(pyfunc)
# Check that the array is not frozen into the LLVM IR.
# LLVM size must be less than the array size.
self.assertLess(len(cfunc.inspect_llvm((),)), biggie.nbytes)
# Run and test result
out = cfunc()
self.assertIs(biggie, out)
# Remove all local references to biggie
del out
biggie = None # del biggie is syntax error in py2
# Run again and verify result
out = cfunc()
np.testing.assert_equal(expect, out)
self.assertEqual(typeof(expect), typeof(out))
nelem = 10**7 # 10 million items
c_array = np.arange(nelem).reshape(nelem)
f_array = np.asfortranarray(np.random.random((2, nelem // 2)))
self.assertEqual(typeof(c_array).layout, 'C')
self.assertEqual(typeof(f_array).layout, 'F')
# Test C contig
test(c_array)
# Test F contig
test(f_array)
| TestConstantArray |
python | PrefectHQ__prefect | src/prefect/infrastructure/provisioners/modal.py | {
"start": 771,
"end": 9142
} | class ____:
"""
A infrastructure provisioner for Modal push work pools.
"""
def __init__(self, client: Optional["PrefectClient"] = None):
self._console: Console = Console()
@property
def console(self) -> Console:
return self._console
@console.setter
def console(self, value: Console) -> None:
self._console = value
@staticmethod
def _is_modal_installed() -> bool:
"""
Checks if the modal package is installed.
Returns:
True if the modal package is installed, False otherwise
"""
try:
importlib.import_module("modal")
return True
except ModuleNotFoundError:
return False
async def _install_modal(self):
"""
Installs the modal package.
"""
with Progress(
SpinnerColumn(),
TextColumn("[bold blue]Installing modal..."),
transient=True,
console=self.console,
) as progress:
task = progress.add_task("modal install")
progress.start()
global modal
await ainstall_packages(["modal"])
modal = importlib.import_module("modal")
progress.advance(task)
async def _get_modal_token_id_and_secret(self) -> Tuple[str, str]:
"""
Gets a Model API token ID and secret from the current Modal configuration.
"""
modal_config = modal.config.Config()
modal_token_id = modal_config.get("token_id")
modal_token_secret = modal_config.get("token_secret")
return modal_token_id, modal_token_secret
async def _create_new_modal_token(self):
"""
Triggers a Modal login via the browser. Will create a new token in the default Modal profile.
"""
await run_process([shlex.quote(sys.executable), "-m", "modal", "token", "new"])
# Reload the modal.config module to pick up the new token
importlib.reload(modal.config)
async def _create_modal_credentials_block(
self,
block_document_name: str,
modal_token_id: str,
modal_token_secret: str,
client: "PrefectClient",
) -> BlockDocument:
"""
Creates a ModalCredentials block containing the provided token ID and secret.
Args:
block_document_name: The name of the block document to create
modal_token_id: The Modal token ID
modal_token_secret: The Modal token secret
Returns:
The ID of the created block
"""
assert client is not None, "client injection failed"
try:
credentials_block_type = await client.read_block_type_by_slug(
"modal-credentials"
)
except ObjectNotFound:
# Shouldn't happen, but just in case
raise RuntimeError(
"Unable to find ModalCredentials block type. Please ensure you are"
" using Prefect Cloud."
)
credentials_block_schema = (
await client.get_most_recent_block_schema_for_block_type(
block_type_id=credentials_block_type.id
)
)
assert credentials_block_schema is not None, (
f"Unable to find schema for block type {credentials_block_type.slug}"
)
block_doc = await client.create_block_document(
block_document=BlockDocumentCreate(
name=block_document_name,
data={
"token_id": modal_token_id,
"token_secret": modal_token_secret,
},
block_type_id=credentials_block_type.id,
block_schema_id=credentials_block_schema.id,
)
)
return block_doc
@inject_client
async def provision(
self,
work_pool_name: str,
base_job_template: Dict[str, Any],
client: Optional["PrefectClient"] = None,
) -> Dict[str, Any]:
"""
Provisions resources necessary for a Modal push work pool.
Provisioned resources:
- A ModalCredentials block containing a Modal API token
Args:
work_pool_name: The name of the work pool to provision resources for
base_job_template: The base job template to update
Returns:
A copy of the provided base job template with the provisioned resources
"""
credentials_block_name = f"{work_pool_name}-modal-credentials"
base_job_template_copy = deepcopy(base_job_template)
assert client is not None, "client injection failed"
try:
block_doc = await client.read_block_document_by_name(
credentials_block_name, "modal-credentials"
)
self.console.print(
f"Work pool [blue]{work_pool_name!r}[/] will reuse the existing Modal"
f" credentials block [blue]{credentials_block_name!r}[/blue]"
)
except ObjectNotFound:
if self._console.is_interactive and not Confirm.ask(
(
"To configure your Modal push work pool we'll need to store a Modal"
" token with Prefect Cloud as a block. We'll pull the token from"
" your local Modal configuration or create a new token if we"
" can't find one. Would you like to continue?"
),
console=self.console,
):
self.console.print(
"No problem! You can always configure your Modal push work pool"
" later via the Prefect UI."
)
return base_job_template
if not self._is_modal_installed():
if self.console.is_interactive and Confirm.ask(
(
"The [blue]modal[/] package is required to configure"
" authentication for your work pool. Would you like to install"
" it now?"
),
console=self.console,
):
await self._install_modal()
# Get the current Modal token ID and secret
(
modal_token_id,
modal_token_secret,
) = await self._get_modal_token_id_and_secret()
if not modal_token_id or not modal_token_secret:
# Create a new token one wasn't found
if self.console.is_interactive and Confirm.ask(
(
"Modal credentials not found. Would you like to create a new"
" token?"
),
console=self.console,
):
await self._create_new_modal_token()
(
modal_token_id,
modal_token_secret,
) = await self._get_modal_token_id_and_secret()
else:
raise RuntimeError(
"Modal credentials not found. Please create a new token by"
" running [blue]modal token new[/] and try again."
)
# Create the credentials block
with Progress(
SpinnerColumn(),
TextColumn("[bold blue]Saving Modal credentials..."),
transient=True,
console=self.console,
) as progress:
task = progress.add_task("create modal credentials block")
progress.start()
block_doc = await self._create_modal_credentials_block(
credentials_block_name,
modal_token_id,
modal_token_secret,
client=client,
)
progress.advance(task)
base_job_template_copy["variables"]["properties"]["modal_credentials"][
"default"
] = {"$ref": {"block_document_id": str(block_doc.id)}}
self.console.print(
f"Successfully configured Modal push work pool {work_pool_name!r}!",
style="green",
)
return base_job_template_copy
| ModalPushProvisioner |
python | kamyu104__LeetCode-Solutions | Python/make-the-string-great.py | {
"start": 29,
"end": 400
} | class ____(object):
def makeGood(self, s):
"""
:type s: str
:rtype: str
"""
stk = []
for ch in s:
counter_ch = ch.upper() if ch.islower() else ch.lower()
if stk and stk[-1] == counter_ch:
stk.pop()
else:
stk.append(ch)
return "".join(stk)
| Solution |
python | run-llama__llama_index | llama-index-finetuning/llama_index/finetuning/cross_encoders/dataset_gen.py | {
"start": 500,
"end": 9797
} | class ____:
"""Class for keeping track of each item of Cross-Encoder training Dataset."""
query: str
context: str
score: int
DEFAULT_QUERY_GEN_SYSTEM_PROMPT = """You are Albert a Professor proficient in {qa_topic}.
You are working on creating {num_questions_per_chunk} questions.
You provide the questions such that such that each separate is separated by a semicolon ';' so that different questions can be easily separated by the python split function"""
DEFAULT_QUERY_GEN_USER_PROMPT = """Take a deep breath, read through the below provided document and then create {num_questions_per_chunk} questions and respond with the created questions such that each separate question is separated by a semicolon ';' so that different questions can be easily separated by the python split function.
Document: {context}"""
def generate_synthetic_queries_over_documents(
documents: List[Document],
num_questions_per_chunk: int = 5,
max_chunk_length: int = 3000,
qa_topic: str = "everything",
llm: Optional[LLM] = None,
qa_generate_system_msg: str = DEFAULT_QUERY_GEN_SYSTEM_PROMPT,
qa_generate_user_msg: str = DEFAULT_QUERY_GEN_USER_PROMPT,
) -> List[str]:
questions = []
node_parser = TokenTextSplitter(
separator=" ",
chunk_size=max_chunk_length,
chunk_overlap=0,
backup_separators=["\n"],
tokenizer=get_tokenizer(),
)
llm = llm or OpenAI(model="gpt-3.5-turbo-16k", temperature=0.3)
nodes = node_parser.get_nodes_from_documents(documents, show_progress=False)
node_dict = {
node.node_id: node.get_content(metadata_mode=MetadataMode.NONE)
for node in nodes
}
for node_id, text in tqdm(node_dict.items()):
system_msg = qa_generate_system_msg.format(
num_questions_per_chunk=num_questions_per_chunk, qa_topic=qa_topic
)
user_msg = qa_generate_user_msg.format(
num_questions_per_chunk=num_questions_per_chunk, context=text
)
messages = [
ChatMessage(role="system", content=system_msg),
ChatMessage(role="user", content=user_msg),
]
response = llm.chat(messages)
response_content: str = (
response.message.content if response.message.content is not None else ""
)
response_questions = re.split(";|\n", response_content)
response_questions = response_questions[:num_questions_per_chunk]
num_questions_generated = len(response_questions)
if num_questions_generated < num_questions_per_chunk:
warnings.warn(
f"Fewer questions generated ({num_questions_generated}) "
f"than requested ({num_questions_per_chunk})."
)
questions.extend(response_questions)
return questions
# Query-Doc relevance prompt taken from OpenAI cookbook:-
# https://github.com/openai/openai-cookbook/blob/main/examples/Search_reranking_with_cross-encoders.ipynb
DEFAULT_QUERY_DOC_RELEVANCE_PROMPT = '''You are an Assistant responsible for helping detect whether the retrieved document is relevant to the query. For a given input, you need to output a single token: "Yes" or "No" indicating the retrieved document is relevant to the query.
Query: How to plant a tree?
Document: """Cars were invented in 1886, when German inventor Carl Benz patented his Benz Patent-Motorwagen.[3][4][5] Cars became widely available during the 20th century. One of the first cars affordable by the masses was the 1908 Model T, an American car manufactured by the Ford Motor Company. Cars were rapidly adopted in the US, where they replaced horse-drawn carriages.[6] In Europe and other parts of the world, demand for automobiles did not increase until after World War II.[7] The car is considered an essential part of the developed economy."""
Relevant: No
Query: Has the coronavirus vaccine been approved?
Document: """The Pfizer-BioNTech COVID-19 vaccine was approved for emergency use in the United States on December 11, 2020."""
Relevant: Yes
Query: What is the capital of France?
Document: """Paris, France's capital, is a major European city and a global center for art, fashion, gastronomy and culture. Its 19th-century cityscape is crisscrossed by wide boulevards and the River Seine. Beyond such landmarks as the Eiffel Tower and the 12th-century, Gothic Notre-Dame cathedral, the city is known for its cafe culture and designer boutiques along the Rue du Faubourg Saint-Honoré."""
Relevant: Yes
Query: What are some papers to learn about PPO reinforcement learning?
Document: """Proximal Policy Optimization and its Dynamic Version for Sequence Generation: In sequence generation task, many works use policy gradient for model optimization to tackle the intractable backpropagation issue when maximizing the non-differentiable evaluation metrics or fooling the discriminator in adversarial learning. In this paper, we replace policy gradient with proximal policy optimization (PPO), which is a proved more efficient reinforcement learning algorithm, and propose a dynamic approach for PPO (PPO-dynamic). We demonstrate the efficacy of PPO and PPO-dynamic on conditional sequence generation tasks including synthetic experiment and chit-chat chatbot. The results show that PPO and PPO-dynamic can beat policy gradient by stability and performance."""
Relevant: Yes
Query: Explain sentence embeddings
Document: """Inside the bubble: exploring the environments of reionisation-era Lyman-α emitting galaxies with JADES and FRESCO: We present a study of the environments of 16 Lyman-α emitting galaxies (LAEs) in the reionisation era (5.8<z<8) identified by JWST/NIRSpec as part of the JWST Advanced Deep Extragalactic Survey (JADES). Unless situated in sufficiently (re)ionised regions, Lyman-α emission from these galaxies would be strongly absorbed by neutral gas in the intergalactic medium (IGM). We conservatively estimate sizes of the ionised regions required to reconcile the relatively low Lyman-α velocity offsets (ΔvLyα<300kms−1) with moderately high Lyman-α escape fractions (fesc,Lyα>5%) observed in our sample of LAEs, indicating the presence of ionised ``bubbles'' with physical sizes of the order of 0.1pMpc≲Rion≲1pMpc in a patchy reionisation scenario where the bubbles are embedded in a fully neutral IGM. Around half of the LAEs in our sample are found to coincide with large-scale galaxy overdensities seen in FRESCO at z∼5.8-5.9 and z∼7.3, suggesting Lyman-α transmission is strongly enhanced in such overdense regions, and underlining the importance of LAEs as tracers of the first large-scale ionised bubbles. Considering only spectroscopically confirmed galaxies, we find our sample of UV-faint LAEs (MUV≳−20mag) and their direct neighbours are generally not able to produce the required ionised regions based on the Lyman-α transmission properties, suggesting lower-luminosity sources likely play an important role in carving out these bubbles. These observations demonstrate the combined power of JWST multi-object and slitless spectroscopy in acquiring a unique view of the early stages of Cosmic Reionisation via the most distant LAEs."""
Relevant: No
Query: {query}
Document: """{document}"""
Relevant:
'''
def generate_ce_fine_tuning_dataset(
documents: List[Document],
questions_list: List[str],
max_chunk_length: int = 1000,
llm: Optional[LLM] = None,
qa_doc_relevance_prompt: str = DEFAULT_QUERY_DOC_RELEVANCE_PROMPT,
top_k: int = 8,
) -> List[CrossEncoderFinetuningDatasetSample]:
ce_dataset_list = []
node_parser = TokenTextSplitter(
separator=" ",
chunk_size=max_chunk_length,
chunk_overlap=0,
backup_separators=["\n"],
tokenizer=get_tokenizer(),
)
# Use logit bias in case of OpenAI for the tokens for Yes and No
# to decrease the likelihood of any other tokens occurring
llm = llm or OpenAI(
model="gpt-3.5-turbo-16k", temperature=0.1, logit_bias={9642: 1, 2822: 1}
)
nodes = node_parser.get_nodes_from_documents(documents, show_progress=False)
index = VectorStoreIndex(nodes)
retriever = index.as_retriever(similarity_top_k=top_k)
for question in tqdm(questions_list):
if question != "":
retrieved_nodes = retriever.retrieve(question)
for node in retrieved_nodes:
node_content = node.get_text()
msg_prompt = qa_doc_relevance_prompt.format(
query=question, document=node_content
)
response = llm.complete(msg_prompt)
result = response.text.strip().lower()
if result == "yes":
question_row = CrossEncoderFinetuningDatasetSample(
query=question, context=node_content, score=1
)
ce_dataset_list.append(question_row)
elif result == "no":
question_row = CrossEncoderFinetuningDatasetSample(
query=question, context=node_content, score=0
)
ce_dataset_list.append(question_row)
else:
pass
return ce_dataset_list
| CrossEncoderFinetuningDatasetSample |
python | Netflix__metaflow | metaflow/plugins/azure/azure_secret_manager_secrets_provider.py | {
"start": 627,
"end": 762
} | class ____(MetaflowException):
"""Raised when the secret path does not match to expected length"""
| MetaflowAzureKeyVaultBadSecretPath |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/call_graph.py | {
"start": 925,
"end": 2052
} | class ____:
args: tuple[object, ...] | Mapping[str, object] | None = None
def __init__(self) -> None: ...
def is_dict(obj: object) -> TypeGuard[dict[object, object]]:
return isinstance(obj, dict)
def test_chained_assign_subscript(record: LogRecord):
if is_dict(record.args) and "headers" in record.args and is_dict(record.args["headers"]):
headers = record.args["headers"] = {**record.args["headers"]} # pyre-ignore
# Treated as:
# ```
# headers = {**record.args["headers"]}
# record.args["headers"] = {**record.args["headers"]}
# ```
# This is a corner case where the type resolution leads to different
# callees in the right hand side expression
def test_localized_target():
if 1 < 2:
f = lambda: None
else:
def f() -> None:
return
f()
T = TypeVar("T")
def no_op_decorator_factory(x: int) -> Callable[[T], T]: # pyre-ignore
def inner(original: T) -> T:
setattr(original, "foo", "bar")
return original
return inner
def no_op_decorator(f: T) -> T:
return f
| LogRecord |
python | ray-project__ray | doc/source/_ext/callouts.py | {
"start": 4254,
"end": 5307
} | class ____(SphinxDirective):
"""Annotations directive, which is only used nested within a Callout directive."""
has_content = True
def run(self):
content = self.content
content = _replace_numbers(content)
joined_content = "\n".join(content)
annotations_node = callout(joined_content)
_parse_recursively(self, annotations_node)
return [annotations_node]
def setup(app):
# Add new node types
app.add_node(
callout,
html=(visit_callout_node, depart_callout_node),
latex=(visit_callout_node, depart_callout_node),
text=(visit_callout_node, depart_callout_node),
)
app.add_node(annotations)
# Add new directives
app.add_directive("callout", CalloutDirective)
app.add_directive("annotations", AnnotationsDirective)
# Add post-processor
app.add_post_transform(CalloutIncludePostTransform)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| AnnotationsDirective |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/bedrock.py | {
"start": 40185,
"end": 42318
} | class ____(AwsBaseOperator[BedrockAgentRuntimeHook]):
"""
Query a knowledge base and retrieve results with source citations.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BedrockRetrieveOperator`
:param retrieval_query: The query to be made to the knowledge base. (templated)
:param knowledge_base_id: The unique identifier of the knowledge base that is queried. (templated)
:param vector_search_config: How the results from the vector search should be returned. (templated)
For more information, see https://docs.aws.amazon.com/bedrock/latest/userguide/kb-test-config.html.
:param retrieve_kwargs: Additional keyword arguments to pass to the API call. (templated)
"""
aws_hook_class = BedrockAgentRuntimeHook
template_fields: Sequence[str] = aws_template_fields(
"retrieval_query",
"knowledge_base_id",
"vector_search_config",
"retrieve_kwargs",
)
def __init__(
self,
retrieval_query: str,
knowledge_base_id: str,
vector_search_config: dict[str, Any] | None = None,
retrieve_kwargs: dict[str, Any] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.retrieval_query = retrieval_query
self.knowledge_base_id = knowledge_base_id
self.vector_search_config = vector_search_config
self.retrieve_kwargs = retrieve_kwargs or {}
def execute(self, context: Context) -> Any:
retrieval_configuration = (
{"retrievalConfiguration": {"vectorSearchConfiguration": self.vector_search_config}}
if self.vector_search_config
else {}
)
result = self.hook.conn.retrieve(
retrievalQuery={"text": self.retrieval_query},
knowledgeBaseId=self.knowledge_base_id,
**retrieval_configuration,
**self.retrieve_kwargs,
)
self.log.info("\nQuery: %s\nRetrieved: %s", self.retrieval_query, result["retrievalResults"])
return result
| BedrockRetrieveOperator |
python | spack__spack | var/spack/test_repos/spack_repo/tutorial/packages/elpa/package.py | {
"start": 228,
"end": 2753
} | class ____(AutotoolsPackage):
"""Eigenvalue solvers for Petaflop-Applications (ELPA)"""
homepage = "http://elpa.mpcdf.mpg.de/"
url = "http://elpa.mpcdf.mpg.de/elpa-2015.11.001.tar.gz"
version("2018.05.001.rc1", md5="ccd77bd8036988ee624f43c04992bcdd")
version("2017.11.001", md5="4a437be40cc966efb07aaab84c20cd6e", preferred=True)
version("2017.05.003", md5="7c8e5e58cafab212badaf4216695700f")
version("2017.05.002", md5="d0abc1ac1f493f93bf5e30ec8ab155dc")
version("2016.11.001.pre", md5="5656fd066cf0dcd071dbcaf20a639b37")
version("2016.05.004", md5="c0dd3a53055536fc3a2a221e78d8b376")
version("2016.05.003", md5="88a9f3f3bfb63e16509dd1be089dcf2c")
version("2015.11.001", md5="de0f35b7ee7c971fd0dca35c900b87e6")
variant("openmp", default=False, description="Activates OpenMP support")
variant("optflags", default=True, description="Build with optimization flags")
depends_on("mpi")
depends_on("blas")
depends_on("lapack")
depends_on("scalapack")
def url_for_version(self, version):
t = "http://elpa.mpcdf.mpg.de/html/Releases/{0}/elpa-{0}.tar.gz"
if version < Version("2016.05.003"):
t = "http://elpa.mpcdf.mpg.de/elpa-{0}.tar.gz"
return t.format(str(version))
@property
def libs(self):
libname = "libelpa_openmp" if "+openmp" in self.spec else "libelpa"
return find_libraries(libname, root=self.prefix, shared=True, recursive=True)
build_directory = "spack-build"
def setup_run_environment(self, env: EnvironmentModifications) -> None:
# TUTORIAL: set the following environment variables:
#
# CC=spec['mpi'].mpicc
# FC=spec['mpi'].mpifc
# CXX=spec['mpi'].mpicxx
# SCALAPACK_LDFLAGS=spec['scalapack'].libs.joined()
#
# and append the following flags:
#
# LDFLAGS -> spec['lapack'].libs.search_flags
# LIBS -> spec['lapack'].libs.link_flags
pass
def configure_args(self):
# TODO: set optimum flags for platform+compiler combo, see
# https://github.com/hfp/xconfigure/tree/master/elpa
# also see:
# https://src.fedoraproject.org/cgit/rpms/elpa.git/
# https://packages.qa.debian.org/e/elpa.html
options = []
if "+optflags" in self.spec:
options.extend(["FCFLAGS=-O2 -ffree-line-length-none", "CFLAGS=-O2"])
if "+openmp" in self.spec:
options.append("--enable-openmp")
return options
| Elpa |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.