language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/execution/retries.py
|
{
"start": 2014,
"end": 6363
}
|
class ____:
def __init__(self, previous_attempts: Optional[Mapping[str, int]] = None):
self._attempts = defaultdict(int)
for key, val in check.opt_mapping_param(
previous_attempts, "previous_attempts", key_type=str, value_type=int
).items():
self._attempts[key] = val
def get_attempt_count(self, key: str) -> int:
return self._attempts[key]
def mark_attempt(self, key: str) -> None:
self._attempts[key] += 1
def snapshot_attempts(self) -> Mapping[str, int]:
return dict(self._attempts)
def auto_reexecution_should_retry_run(
instance: "DagsterInstance", run: "DagsterRun", run_failure_reason: Optional["RunFailureReason"]
):
"""Determines if a run will be retried by the automatic reexcution system.
A run will retry if:
- it is failed.
- the number of max allowed retries is > 0 (max retries can be set via system setting or run tag).
- there have not already been >= max_retries retries for the run.
If the run failure reason was a step failure and the retry_on_asset_or_op_failure tag/system setting is set to false,
a warning message will be logged and the run will not be retried.
We determine how many retries have been launched for the run by looking at the size of the run group
(the set of runs that have the same root_run_id and the run with root_run_id). Since manually launched retries are
part of the run group, this means that if a user launches a manual retry of run A and then this function
is called because a retry for run A launched by the auto-reexecution system failed, the manual retry will be
counted toward max_retries.
It is unlikely, but possible, that one "extra" retry will be launched by the automatic reexecution system
since manual retries could be happening in parallel with automatic retries. Here is
an illustrative example:
- Max retries is 3
- Run A fails
- The automatic reexecution system launches a retry of run A (A_1), which fails
- The automatic reexecution system launches a retry run A_1 (A_2), which fails
- This function is executing and has fetched the run_group for run A_2: (A, A_1, A_2)
- A user launches a manual retry of run A (A_m). The run group is now (A, A_1, A_2, A_m), but this function does
not have the updated run group
- Since the run group we've fetched is (A, A_1, A_2), this function will mark A_2 as `will_retry=true` and
run `A_3` will be launched. This is the "extra" retry, since usually manual retries are counted toward max_retries, but
in this case it was not.
We think this is an acceptable tradeoff to make since the automatic reexecution system won't launch more than max_retries
run itself, just that max_retries + 1 runs could be launched in total if a manual retry is timed to cause this condition (unlikely).
"""
from dagster._core.events import RunFailureReason
from dagster._core.storage.dagster_run import DagsterRunStatus
if run.status != DagsterRunStatus.FAILURE:
return False
retry_on_asset_or_op_failure = get_boolean_tag_value(
run.tags.get(RETRY_ON_ASSET_OR_OP_FAILURE_TAG),
default_value=instance.run_retries_retry_on_asset_or_op_failure,
)
if run_failure_reason == RunFailureReason.STEP_FAILURE and not retry_on_asset_or_op_failure:
return False
raw_max_retries_tag = run.tags.get(MAX_RETRIES_TAG)
if raw_max_retries_tag is None:
max_retries = instance.run_retries_max_retries
else:
try:
max_retries = int(raw_max_retries_tag)
except ValueError:
warnings.warn(f"Error parsing int from tag {MAX_RETRIES_TAG}, won't retry the run.")
return False
if max_retries > 0:
try:
run_group = instance.get_run_group(run.run_id)
except DagsterRunNotFoundError:
# can happen if either this run or the root run in the run group was deleted
return False
if run_group is not None:
_, run_group_iter = run_group
# since the original run is in the run group, the number of retries launched
# so far is len(run_group_iter) - 1
if len(list(run_group_iter)) <= max_retries:
return True
return False
|
RetryState
|
python
|
numba__numba
|
numba/tests/test_parfors_passes.py
|
{
"start": 589,
"end": 1011
}
|
class ____(object):
def __init__(self, typingctx, targetctx, args, test_ir):
self.state = compiler.StateDict()
self.state.typingctx = typingctx
self.state.targetctx = targetctx
self.state.args = args
self.state.func_ir = test_ir
self.state.typemap = None
self.state.return_type = None
self.state.calltypes = None
self.state.metadata = {}
|
MyPipeline
|
python
|
apache__airflow
|
providers/ftp/src/airflow/providers/ftp/hooks/ftp.py
|
{
"start": 9857,
"end": 11066
}
|
class ____(FTPHook):
"""Interact with FTPS."""
def get_conn(self) -> ftplib.FTP:
"""Return an FTPS connection object."""
import ssl
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
pasv = params.extra_dejson.get("passive", True)
encoding = params.extra_dejson.get("encoding")
self.encoding = encoding
if params.port:
ftplib.FTP_TLS.port = params.port
# Construct FTP_TLS instance with SSL context to allow certificates to be validated by default
context = ssl.create_default_context()
params.host = cast("str", params.host)
params.password = cast("str", params.password)
params.login = cast("str", params.login)
if encoding:
self.conn = ftplib.FTP_TLS(
params.host, params.login, params.password, context=context, encoding=encoding
) # nosec: B321
else:
self.conn = ftplib.FTP_TLS(params.host, params.login, params.password, context=context) # nosec: B321
self.conn.set_pasv(pasv)
return self.conn
|
FTPSHook
|
python
|
getsentry__sentry
|
src/sentry/snuba/snuba_query_validator.py
|
{
"start": 2361,
"end": 17211
}
|
class ____(BaseDataSourceValidator[QuerySubscription]):
query_type = serializers.IntegerField(required=False)
dataset = serializers.CharField(required=True)
query = serializers.CharField(required=True, allow_blank=True)
aggregate = serializers.CharField(required=True)
time_window = serializers.IntegerField(required=True)
environment = EnvironmentField(required=True, allow_null=True)
event_types = serializers.ListField(
child=serializers.CharField(),
)
group_by = serializers.ListField(
child=serializers.CharField(allow_blank=False, max_length=200),
required=False,
allow_empty=False,
)
extrapolation_mode = serializers.CharField(required=False, allow_null=True)
class Meta:
model = QuerySubscription
fields = [
"query_type",
"dataset",
"query",
"aggregate",
"time_window",
"environment",
"event_types",
"group_by",
"extrapolation_mode",
]
data_source_type_handler = QuerySubscriptionDataSourceHandler
def __init__(self, *args, timeWindowSeconds=False, **kwargs):
super().__init__(*args, **kwargs)
# if true, time_window is interpreted as seconds.
# if false, time_window is interpreted as minutes.
# TODO: only accept time_window in seconds once AlertRuleSerializer is removed
self.time_window_seconds = timeWindowSeconds
def validate_aggregate(self, aggregate: str) -> str:
"""
Reject upsampled_count() as user input. This function is reserved for internal use
and will be applied automatically when appropriate. Users should specify count().
"""
if aggregate == "upsampled_count()":
raise serializers.ValidationError(
"upsampled_count() is not allowed as user input. Use count() instead - "
"it will be automatically converted to upsampled_count() when appropriate."
)
return aggregate
def validate_query_type(self, value: int) -> SnubaQuery.Type:
try:
return SnubaQuery.Type(value)
except ValueError:
raise serializers.ValidationError(f"Invalid query type {value}")
def validate_dataset(self, value: str) -> Dataset:
try:
dataset_value = Dataset(value)
if dataset_value in [Dataset.PerformanceMetrics, Dataset.Transactions]:
return self._validate_performance_dataset(dataset_value)
return dataset_value
except ValueError:
raise serializers.ValidationError(
"Invalid dataset, valid values are %s" % [item.value for item in Dataset]
)
def validate_query(self, query: str):
query_terms = query.split()
for query_term in query_terms:
if query_term in UNSUPPORTED_QUERIES:
raise serializers.ValidationError(
f"Unsupported Query: We do not currently support the {query_term} query"
)
return query
def validate_event_types(self, value: Sequence[str]) -> list[SnubaQueryEventType.EventType]:
try:
validated = [SnubaQueryEventType.EventType[event_type.upper()] for event_type in value]
except KeyError:
raise serializers.ValidationError(
"Invalid event_type, valid values are %s"
% [item.name.lower() for item in SnubaQueryEventType.EventType]
)
if not is_logs_enabled(
self.context["organization"], actor=self.context.get("user", None)
) and any([v for v in validated if v == SnubaQueryEventType.EventType.TRACE_ITEM_LOG]):
raise serializers.ValidationError("You do not have access to the log alerts feature.")
return validated
def validate_extrapolation_mode(self, extrapolation_mode: str) -> ExtrapolationMode | None:
if extrapolation_mode is not None:
extrapolation_mode_enum = ExtrapolationMode.from_str(extrapolation_mode)
if extrapolation_mode_enum is None:
raise serializers.ValidationError(
f"Invalid extrapolation mode: {extrapolation_mode}"
)
return extrapolation_mode_enum
def validate(self, data):
data = super().validate(data)
self._validate_aggregate(data)
self._validate_query(data)
data["group_by"] = self._validate_group_by(data.get("group_by"))
query_type = data["query_type"]
if query_type == SnubaQuery.Type.CRASH_RATE:
data["event_types"] = []
event_types = data.get("event_types")
valid_event_types = QUERY_TYPE_VALID_EVENT_TYPES.get(query_type, set())
if event_types and set(event_types) - valid_event_types:
raise serializers.ValidationError(
"Invalid event types for this dataset. Valid event types are %s"
% sorted(et.name.lower() for et in valid_event_types)
)
dataset = data.get("dataset")
if dataset == Dataset.EventsAnalyticsPlatform and event_types and len(event_types) > 1:
raise serializers.ValidationError(
"Multiple event types not allowed. Valid event types are %s"
% sorted(et.name.lower() for et in valid_event_types)
)
return data
def _validate_aggregate(self, data):
dataset = data.setdefault("dataset", Dataset.Events)
aggregate = data.get("aggregate")
allow_mri = features.has(
"organizations:custom-metrics",
self.context["organization"],
actor=self.context.get("user", None),
) or features.has(
"organizations:insights-alerts",
self.context["organization"],
actor=self.context.get("user", None),
)
allow_eap = dataset == Dataset.EventsAnalyticsPlatform
try:
if not check_aggregate_column_support(
aggregate,
allow_mri=allow_mri,
allow_eap=allow_eap,
):
raise serializers.ValidationError(
{"aggregate": _("Invalid Metric: We do not currently support this field.")}
)
except InvalidSearchQuery as e:
raise serializers.ValidationError({"aggregate": _(f"Invalid Metric: {e}")})
data["aggregate"] = translate_aggregate_field(
aggregate, allow_mri=allow_mri, allow_eap=allow_eap
)
def _validate_query(self, data):
dataset = data.setdefault("dataset", Dataset.Events)
if features.has(
"organizations:custom-metrics",
self.context["organization"],
actor=self.context.get("user", None),
) or features.has(
"organizations:insights-alerts",
self.context["organization"],
actor=self.context.get("user", None),
):
column = get_column_from_aggregate(
data["aggregate"],
allow_mri=True,
allow_eap=dataset == Dataset.EventsAnalyticsPlatform,
)
if is_mri(column) and dataset != Dataset.PerformanceMetrics:
raise serializers.ValidationError(
"You can use an MRI only on alerts on performance metrics"
)
query_type = data.setdefault("query_type", query_datasets_to_type[dataset])
valid_datasets = QUERY_TYPE_VALID_DATASETS[query_type]
if dataset not in valid_datasets:
raise serializers.ValidationError(
"Invalid dataset for this query type. Valid datasets are %s"
% sorted(dataset.name.lower() for dataset in valid_datasets)
)
if (
not features.has(
"organizations:mep-rollout-flag",
self.context["organization"],
actor=self.context.get("user", None),
)
and dataset == Dataset.PerformanceMetrics
and query_type == SnubaQuery.Type.PERFORMANCE
):
raise serializers.ValidationError(
"This project does not have access to the `generic_metrics` dataset"
)
projects = data.get("projects")
if not projects:
# We just need a valid project id from the org so that we can verify
# the query. We don't use the returned data anywhere, so it doesn't
# matter which.
projects = list(self.context["organization"].project_set.all()[:1])
try:
entity_subscription = get_entity_subscription(
query_type,
dataset=dataset,
aggregate=data["aggregate"],
time_window=data["time_window"],
extra_fields={
"org_id": projects[0].organization_id,
"event_types": data.get("event_types"),
"extrapolation_mode": data.get("extrapolation_mode"),
},
)
except UnsupportedQuerySubscription as e:
raise serializers.ValidationError(f"{e}")
# TODO(edward): Bypass snql query validation for EAP queries. Do we need validation for rpc requests?
if dataset != Dataset.EventsAnalyticsPlatform:
self._validate_snql_query(data, entity_subscription, projects)
def _validate_snql_query(self, data, entity_subscription, projects):
end = timezone.now()
start = end - timedelta(minutes=10)
try:
query_builder = entity_subscription.build_query_builder(
query=data["query"],
project_ids=[p.id for p in projects],
environment=data.get("environment"),
params={
"organization_id": projects[0].organization_id,
"project_id": [p.id for p in projects],
"start": start,
"end": end,
},
)
except (InvalidSearchQuery, ValueError, IncompatibleMetricsQuery) as e:
raise serializers.ValidationError(f"Invalid Query or Metric: {e}")
if not query_builder.are_columns_resolved():
raise serializers.ValidationError(
"Invalid Metric: Please pass a valid function for aggregation"
)
dataset = Dataset(data["dataset"].value)
self._validate_time_window(data.get("time_window"), dataset)
entity = Entity(Dataset.Events.value, alias=Dataset.Events.value)
time_col = ENTITY_TIME_COLUMNS[get_entity_key_from_query_builder(query_builder)]
query_builder.add_conditions(
[
Condition(Column(time_col, entity=entity), Op.GTE, start),
Condition(Column(time_col, entity=entity), Op.LT, end),
]
)
query_builder.limit = Limit(1)
try:
query_builder.run_query(referrer="alertruleserializer.test_query")
except Exception:
logger.exception("Error while validating snuba alert rule query")
raise serializers.ValidationError(
"Invalid Query or Metric: An error occurred while attempting " "to run the query"
)
def _validate_time_window(self, value: int, dataset: Dataset):
time_window_seconds = value * 60 if not self.time_window_seconds else value
if dataset == Dataset.Metrics:
if time_window_seconds not in CRASH_RATE_ALERTS_ALLOWED_TIME_WINDOWS:
raise serializers.ValidationError(
"Invalid Time Window: Allowed time windows for crash rate alerts are: "
"30min, 1h, 2h, 4h, 12h and 24h"
)
if dataset == Dataset.EventsAnalyticsPlatform:
if time_window_seconds < 300:
raise serializers.ValidationError(
"Invalid Time Window: Time window for this alert type must be at least 5 minutes."
)
return time_window_seconds
def _validate_performance_dataset(self, dataset):
if dataset != Dataset.Transactions:
return dataset
has_dynamic_sampling = features.has(
"organizations:dynamic-sampling", self.context["organization"]
)
has_performance_metrics_flag = features.has(
"organizations:mep-rollout-flag", self.context["organization"]
)
has_performance_metrics = has_dynamic_sampling and has_performance_metrics_flag
has_on_demand_metrics = features.has(
"organizations:on-demand-metrics-extraction",
self.context["organization"],
)
if has_performance_metrics or has_on_demand_metrics:
raise serializers.ValidationError(
"Performance alerts must use the `generic_metrics` dataset"
)
return dataset
def _validate_group_by(self, value: Sequence[str] | None) -> Sequence[str] | None:
if value is None:
return None
if not features.has(
"organizations:workflow-engine-metric-alert-group-by-creation",
self.context["organization"],
actor=self.context.get("user", None),
):
raise serializers.ValidationError(
"Group by Metric Alerts feature must be enabled to use this field"
)
if len(value) > 100:
raise serializers.ValidationError("Group by must be 100 or fewer items")
# group by has to be unique list of strings
if len(value) != len(set(value)):
raise serializers.ValidationError("Group by must be a unique list of strings")
# TODO:
# validate that group by is a valid snql / EAP column?
return value
@override
def create_source(self, validated_data) -> QuerySubscription:
snuba_query = create_snuba_query(
query_type=validated_data["query_type"],
dataset=validated_data["dataset"],
query=validated_data["query"],
aggregate=validated_data["aggregate"],
time_window=timedelta(seconds=validated_data["time_window"]),
resolution=timedelta(minutes=1),
environment=validated_data["environment"],
event_types=validated_data["event_types"],
group_by=validated_data.get("group_by"),
extrapolation_mode=validated_data.get("extrapolation_mode"),
)
return create_snuba_subscription(
project=self.context["project"],
subscription_type=INCIDENTS_SNUBA_SUBSCRIPTION_TYPE,
snuba_query=snuba_query,
)
|
SnubaQueryValidator
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/battlenet/views.py
|
{
"start": 662,
"end": 2059
}
|
class ____:
APAC = "apac"
CN = "cn"
EU = "eu"
KR = "kr"
SEA = "sea"
TW = "tw"
US = "us"
def _check_errors(response):
try:
data = response.json()
except ValueError: # JSONDecodeError on py3
raise OAuth2Error("Invalid JSON from Battle.net API: %r" % (response.text))
if response.status_code >= HTTPStatus.BAD_REQUEST or "error" in data:
# For errors, we expect the following format:
# {"error": "error_name", "error_description": "Oops!"}
# For example, if the token is not valid, we will get:
# {
# "error": "invalid_token",
# "error_description": "Invalid access token: abcdef123456"
# }
# For the profile API, this may also look like the following:
# {"code": 403, "type": "Forbidden", "detail": "Account Inactive"}
error = data.get("error", "") or data.get("type", "")
desc = data.get("error_description", "") or data.get("detail", "")
raise OAuth2Error("Battle.net error: %s (%s)" % (error, desc))
# The expected output from the API follows this format:
# {"id": 12345, "battletag": "Example#12345"}
# The battletag is optional.
if "id" not in data:
# If the id is not present, the output is not usable (no UID)
raise OAuth2Error("Invalid data from Battle.net API: %r" % (data))
return data
|
Region
|
python
|
networkx__networkx
|
networkx/algorithms/isomorphism/vf2userfunc.py
|
{
"start": 4736,
"end": 7165
}
|
class ____(vf2.DiGraphMatcher):
"""VF2 isomorphism checker for directed graphs."""
def __init__(self, G1, G2, node_match=None, edge_match=None):
"""Initialize graph matcher.
Parameters
----------
G1, G2 : graph
The graphs to be tested.
node_match : callable
A function that returns True iff node n1 in G1 and n2 in G2
should be considered equal during the isomorphism test. The
function will be called like::
node_match(G1.nodes[n1], G2.nodes[n2])
That is, the function will receive the node attribute dictionaries
of the nodes under consideration. If None, then no attributes are
considered when testing for an isomorphism.
edge_match : callable
A function that returns True iff the edge attribute dictionary for
the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be
considered equal during the isomorphism test. The function will be
called like::
edge_match(G1[u1][v1], G2[u2][v2])
That is, the function will receive the edge attribute dictionaries
of the edges under consideration. If None, then no attributes are
considered when testing for an isomorphism.
"""
vf2.DiGraphMatcher.__init__(self, G1, G2)
self.node_match = node_match
self.edge_match = edge_match
# These will be modified during checks to minimize code repeat.
self.G1_adj = self.G1.adj
self.G2_adj = self.G2.adj
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
# Test node_match and also test edge_match on successors
feasible = _semantic_feasibility(self, G1_node, G2_node)
if not feasible:
return False
# Test edge_match on predecessors
self.G1_adj = self.G1.pred
self.G2_adj = self.G2.pred
feasible = _semantic_feasibility(self, G1_node, G2_node)
self.G1_adj = self.G1.adj
self.G2_adj = self.G2.adj
return feasible
# The "semantics" of edge_match are different for multi(di)graphs, but
# the implementation is the same. So, technically we do not need to
# provide "multi" versions, but we do so to match NetworkX's base classes.
|
DiGraphMatcher
|
python
|
jackfrued__Python-100-Days
|
Day31-35/code/example04.py
|
{
"start": 53,
"end": 1021
}
|
class ____(object):
"""物品"""
def __init__(self, name, price, weight):
self.name = name
self.price = price
self.weight = weight
@property
def value(self):
"""价格重量比"""
return self.price / self.weight
def input_thing():
"""输入物品信息"""
name_str, price_str, weight_str = input().split()
return name_str, int(price_str), int(weight_str)
def main():
"""主函数"""
max_weight, num_of_things = map(int, input().split())
all_things = []
for _ in range(num_of_things):
all_things.append(Thing(*input_thing()))
all_things.sort(key=lambda x: x.value, reverse=True)
total_weight = 0
total_price = 0
for thing in all_things:
if total_weight + thing.weight <= max_weight:
print(f'小偷拿走了{thing.name}')
total_weight += thing.weight
total_price += thing.price
print(f'总价值: {total_price}美元')
if __name__ == '__main__':
main()
|
Thing
|
python
|
fluentpython__example-code-2e
|
19-concurrency/primes/threads.py
|
{
"start": 306,
"end": 1931
}
|
class ____(NamedTuple):
n: int
prime: bool
elapsed: float
JobQueue = SimpleQueue[int] # <4>
ResultQueue = SimpleQueue[PrimeResult] # <5>
def check(n: int) -> PrimeResult: # <6>
t0 = perf_counter()
res = is_prime(n)
return PrimeResult(n, res, perf_counter() - t0)
def worker(jobs: JobQueue, results: ResultQueue) -> None: # <7>
while n := jobs.get(): # <8>
results.put(check(n)) # <9>
results.put(PrimeResult(0, False, 0.0))
def start_jobs(workers: int, jobs: JobQueue, results: ResultQueue) -> None:
for n in NUMBERS: # <3>
jobs.put(n)
for _ in range(workers):
proc = Thread(target=worker, args=(jobs, results)) # <4>
proc.start() # <5>
jobs.put(0) # <6>
def report(workers: int, results: ResultQueue) -> int:
checked = 0
workers_done = 0
while workers_done < workers:
n, prime, elapsed = results.get()
if n == 0:
workers_done += 1
else:
checked += 1
label = 'P' if prime else ' '
print(f'{n:16} {label} {elapsed:9.6f}s')
return checked
def main() -> None:
if len(sys.argv) < 2:
workers = os.cpu_count()
else:
workers = int(sys.argv[1])
print(f'Checking {len(NUMBERS)} numbers with {workers} threads:')
t0 = perf_counter()
jobs: JobQueue = SimpleQueue()
results: ResultQueue = SimpleQueue()
start_jobs(workers, jobs, results)
checked = report(workers, results)
elapsed = perf_counter() - t0
print(f'{checked} checks in {elapsed:.2f}s')
if __name__ == '__main__':
main()
|
PrimeResult
|
python
|
joblib__joblib
|
joblib/externals/loky/backend/synchronize.py
|
{
"start": 5958,
"end": 6808
}
|
class ____(SemLock):
def __init__(self):
super().__init__(RECURSIVE_MUTEX, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != "MainThread":
name = f"{name}|{threading.current_thread().name}"
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = "None", 0
elif self._semlock._count() > 0:
name, count = "SomeOtherThread", "nonzero"
else:
name, count = "SomeOtherProcess", "nonzero"
except Exception:
name, count = "unknown", "unknown"
return f"<{self.__class__.__name__}({name}, {count})>"
#
# Condition variable
#
|
RLock
|
python
|
scrapy__scrapy
|
scrapy/exceptions.py
|
{
"start": 1667,
"end": 1880
}
|
class ____(Exception):
"""To indicate a command-line usage error"""
def __init__(self, *a: Any, **kw: Any):
self.print_help = kw.pop("print_help", True)
super().__init__(*a, **kw)
|
UsageError
|
python
|
ray-project__ray
|
release/llm_tests/serve/probes/query_utils.py
|
{
"start": 3118,
"end": 4849
}
|
class ____:
def __init__(self, response=List[BaseModel]):
self.response = response
def messages(self):
"""In case of streamed response, what are the individual chunked messages? that contain the content we care about?"""
vals = []
for r in self.response:
if len(r.choices) == 0:
continue
v = r.choices[0].model_dump()
if "message" in v and "content" in v["message"]:
vals.append(v["message"]["content"] or "")
elif "delta" in v and "content" in v["delta"]:
vals.append(v["delta"]["content"] or "")
return vals
def messages_dicts(self):
vals = []
for r in self.response:
for choice in r.choices:
vals.append(choice.model_dump())
return vals
def full_dict(self):
messages_dicts = self.messages_dicts()
return apply_delta_changes(messages_dicts)
def full(self) -> str:
"""In case of streamed response, what is the full response by concatenating individual responses?"""
return "".join(self.messages())
def num_completion_tokens(self):
# Usage is set on the last element in the stream
try:
return self.response[-1].usage.completion_tokens
except AttributeError:
return self.response[-1].usage.get("completion_tokens")
def finish_reason(self):
# This should be set on the last response.
for chunk in reversed(self.response):
if len(chunk.choices) > 0:
if chunk.choices[0].finish_reason:
return chunk.choices[0].finish_reason
return None
|
TextGenerationProbeResponse
|
python
|
coleifer__peewee
|
tests/sqlite.py
|
{
"start": 1333,
"end": 2236
}
|
class ____(object):
def __init__(self):
self.total = 0.
self.count = 0.
def step(self, value, weight=None):
weight = weight or 1.
self.total += weight
self.count += (weight * value)
def finalize(self):
if self.total != 0.:
return self.count / self.total
return 0.
def _cmp(l, r):
if l < r:
return -1
return 1 if r < l else 0
def collate_reverse(s1, s2):
return -_cmp(s1, s2)
@database.collation()
def collate_case_insensitive(s1, s2):
return _cmp(s1.lower(), s2.lower())
def title_case(s): return s.title()
@database.func()
def rstrip(s, n):
return s.rstrip(n)
database.register_aggregate(WeightedAverage, 'weighted_avg', 1)
database.register_aggregate(WeightedAverage, 'weighted_avg2', 2)
database.register_collation(collate_reverse)
database.register_function(title_case)
|
WeightedAverage
|
python
|
doocs__leetcode
|
solution/1100-1199/1109.Corporate Flight Bookings/Solution.py
|
{
"start": 0,
"end": 297
}
|
class ____:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
ans = [0] * n
for first, last, seats in bookings:
ans[first - 1] += seats
if last < n:
ans[last] -= seats
return list(accumulate(ans))
|
Solution
|
python
|
great-expectations__great_expectations
|
great_expectations/datasource/fluent/data_connector/s3_data_connector.py
|
{
"start": 959,
"end": 11523
}
|
class ____(FilePathDataConnector):
"""Extension of FilePathDataConnector used to connect to S3.
Args:
datasource_name: The name of the Datasource associated with this DataConnector instance
data_asset_name: The name of the DataAsset using this DataConnector instance
s3_client: Reference to instantiated AWS S3 client handle
bucket (str): bucket for S3
prefix (str): S3 prefix
delimiter (str): S3 delimiter
max_keys (int): S3 max_keys (default is 1000)
recursive_file_discovery (bool): Flag to indicate if files should be searched recursively from subfolders
file_path_template_map_fn: Format function mapping path to fully-qualified resource on S3
whole_directory_path_override: If present, treat entire directory as single Asset
""" # noqa: E501 # FIXME CoP
asset_level_option_keys: ClassVar[tuple[str, ...]] = (
"s3_prefix",
"s3_delimiter",
"s3_max_keys",
"s3_recursive_file_discovery",
)
asset_options_type: ClassVar[Type[_S3Options]] = _S3Options
def __init__( # noqa: PLR0913 # FIXME CoP
self,
datasource_name: str,
data_asset_name: str,
s3_client: BaseClient,
bucket: str,
prefix: str = "",
delimiter: str = "/",
max_keys: int = 1000,
recursive_file_discovery: bool = False,
file_path_template_map_fn: Optional[Callable] = None,
whole_directory_path_override: PathStr | None = None,
) -> None:
self._s3_client: BaseClient = s3_client
self._bucket: str = bucket
self._prefix: str = prefix
self._sanitized_prefix: str = sanitize_prefix_for_gcs_and_s3(text=prefix)
self._delimiter: str = delimiter
self._max_keys: int = max_keys
self._recursive_file_discovery = recursive_file_discovery
super().__init__(
datasource_name=datasource_name,
data_asset_name=data_asset_name,
file_path_template_map_fn=file_path_template_map_fn,
whole_directory_path_override=whole_directory_path_override,
)
@classmethod
def build_data_connector( # noqa: PLR0913 # FIXME CoP
cls,
datasource_name: str,
data_asset_name: str,
s3_client: BaseClient,
bucket: str,
prefix: str = "",
delimiter: str = "/",
max_keys: int = 1000,
recursive_file_discovery: bool = False,
file_path_template_map_fn: Optional[Callable] = None,
whole_directory_path_override: PathStr | None = None,
) -> S3DataConnector:
"""Builds "S3DataConnector", which links named DataAsset to AWS S3.
Args:
datasource_name: The name of the Datasource associated with this "S3DataConnector" instance
data_asset_name: The name of the DataAsset using this "S3DataConnector" instance
s3_client: S3 Client reference handle
bucket: bucket for S3
prefix: S3 prefix
delimiter: S3 delimiter
max_keys: S3 max_keys (default is 1000)
recursive_file_discovery: Flag to indicate if files should be searched recursively from subfolders
file_path_template_map_fn: Format function mapping path to fully-qualified resource on S3
whole_directory_path_override: If present, treat entire directory as single Asset
Returns:
Instantiated "S3DataConnector" object
""" # noqa: E501 # FIXME CoP
return S3DataConnector(
datasource_name=datasource_name,
data_asset_name=data_asset_name,
s3_client=s3_client,
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
max_keys=max_keys,
recursive_file_discovery=recursive_file_discovery,
file_path_template_map_fn=file_path_template_map_fn,
whole_directory_path_override=whole_directory_path_override,
)
@classmethod
def build_test_connection_error_message(
cls,
data_asset_name: str,
bucket: str,
prefix: str = "",
delimiter: str = "/",
recursive_file_discovery: bool = False,
) -> str:
"""Builds helpful error message for reporting issues when linking named DataAsset to Microsoft Azure Blob Storage.
Args:
data_asset_name: The name of the DataAsset using this "AzureBlobStorageDataConnector" instance
bucket: bucket for S3
prefix: S3 prefix
delimiter: S3 delimiter
recursive_file_discovery: Flag to indicate if files should be searched recursively from subfolders
Returns:
Customized error message
""" # noqa: E501 # FIXME CoP
test_connection_error_message_template: str = 'No file in bucket "{bucket}" with prefix "{prefix}" and recursive file discovery set to "{recursive_file_discovery}" found using delimiter "{delimiter}" for DataAsset "{data_asset_name}".' # noqa: E501 # FIXME CoP
return test_connection_error_message_template.format(
**{
"data_asset_name": data_asset_name,
"bucket": bucket,
"prefix": prefix,
"delimiter": delimiter,
"recursive_file_discovery": recursive_file_discovery,
}
)
@override
def build_batch_spec(self, batch_definition: LegacyBatchDefinition) -> S3BatchSpec:
"""
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function.
Args:
batch_definition (LegacyBatchDefinition): to be used to build batch_spec
Returns:
BatchSpec built from batch_definition
"""
batch_spec: PathBatchSpec = super().build_batch_spec(batch_definition=batch_definition)
return S3BatchSpec(batch_spec)
# Interface Method
@override
def get_data_references(self) -> List[str]:
query_options: dict = {
"Bucket": self._bucket,
"Prefix": self._sanitized_prefix,
"Delimiter": self._delimiter,
"MaxKeys": self._max_keys,
}
path_list: List[str] = list(
list_s3_keys(
s3=self._s3_client,
query_options=query_options,
iterator_dict={},
recursive=self._recursive_file_discovery,
)
)
return path_list
# Interface Method
@override
def _get_full_file_path(self, path: str) -> str:
# If the path is already a fully qualified S3 URL (starts with s3://), return it as-is
# This handles the case of whole_directory_path_override which is already fully qualified
if path.startswith("s3://"):
return path
if self._file_path_template_map_fn is None:
raise MissingFilePathTemplateMapFnError()
template_arguments = {
"bucket": self._bucket,
"path": path,
}
return self._file_path_template_map_fn(**template_arguments)
@override
def _preprocess_batching_regex(self, regex: re.Pattern) -> re.Pattern:
regex = re.compile(f"{re.escape(self._sanitized_prefix)}{regex.pattern}")
return super()._preprocess_batching_regex(regex=regex)
def list_s3_keys( # noqa: C901 # too complex
s3, query_options: dict, iterator_dict: dict, recursive: bool = False
) -> Generator[str, None, None]:
"""
For InferredAssetS3DataConnector, we take bucket and prefix and search for files using RegEx at and below the level
specified by that bucket and prefix. However, for ConfiguredAssetS3DataConnector, we take bucket and prefix and
search for files using RegEx only at the level specified by that bucket and prefix. This restriction for the
ConfiguredAssetS3DataConnector is needed, because paths on S3 are comprised not only the leaf file name but the
full path that includes both the prefix and the file name. Otherwise, in the situations where multiple data assets
share levels of a directory tree, matching files to data assets will not be possible, due to the path ambiguity.
:param s3: s3 client connection
:param query_options: s3 query attributes ("Bucket", "Prefix", "Delimiter", "MaxKeys")
:param iterator_dict: dictionary to manage "NextContinuationToken" (if "IsTruncated" is returned from S3)
:param recursive: True for InferredAssetS3DataConnector and False for ConfiguredAssetS3DataConnector (see above)
:return: string valued key representing file path on S3 (full prefix and leaf file name)
""" # noqa: E501 # FIXME CoP
if iterator_dict is None:
iterator_dict = {}
if "continuation_token" in iterator_dict:
query_options.update({"ContinuationToken": iterator_dict["continuation_token"]})
logger.debug(f"Fetching objects from S3 with query options: {query_options}")
s3_objects_info: dict = s3.list_objects_v2(**query_options)
query_options.pop("ContinuationToken", None)
if not any(key in s3_objects_info for key in ["Contents", "CommonPrefixes"]):
raise ValueError("S3 query may not have been configured correctly.") # noqa: TRY003 # FIXME CoP
if "Contents" in s3_objects_info:
keys: List[str] = [item["Key"] for item in s3_objects_info["Contents"] if item["Size"] > 0]
yield from keys
if recursive and "CommonPrefixes" in s3_objects_info:
common_prefixes: List[Dict[str, Any]] = s3_objects_info["CommonPrefixes"]
for prefix_info in common_prefixes:
query_options_tmp: dict = copy.deepcopy(query_options)
query_options_tmp.update({"Prefix": prefix_info["Prefix"]})
# Recursively fetch from updated prefix
yield from list_s3_keys(
s3=s3,
query_options=query_options_tmp,
iterator_dict={},
recursive=recursive,
)
if s3_objects_info["IsTruncated"]:
iterator_dict["continuation_token"] = s3_objects_info["NextContinuationToken"]
# Recursively fetch more
yield from list_s3_keys(
s3=s3,
query_options=query_options,
iterator_dict=iterator_dict,
recursive=recursive,
)
if "continuation_token" in iterator_dict:
# Make sure we clear the token once we've gotten fully through
del iterator_dict["continuation_token"]
|
S3DataConnector
|
python
|
ray-project__ray
|
rllib/models/tf/recurrent_net.py
|
{
"start": 5139,
"end": 11569
}
|
class ____(RecurrentNetwork):
"""An LSTM wrapper serving as an interface for ModelV2s that set use_lstm."""
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
num_outputs: int,
model_config: ModelConfigDict,
name: str,
):
super(LSTMWrapper, self).__init__(
obs_space, action_space, None, model_config, name
)
# At this point, self.num_outputs is the number of nodes coming
# from the wrapped (underlying) model. In other words, self.num_outputs
# is the input size for the LSTM layer.
# If None, set it to the observation space.
if self.num_outputs is None:
self.num_outputs = int(np.prod(self.obs_space.shape))
self.cell_size = model_config["lstm_cell_size"]
self.use_prev_action = model_config["lstm_use_prev_action"]
self.use_prev_reward = model_config["lstm_use_prev_reward"]
self.action_space_struct = get_base_struct_from_space(self.action_space)
self.action_dim = 0
for space in tree.flatten(self.action_space_struct):
if isinstance(space, Discrete):
self.action_dim += space.n
elif isinstance(space, MultiDiscrete):
self.action_dim += np.sum(space.nvec)
elif space.shape is not None:
self.action_dim += int(np.prod(space.shape))
else:
self.action_dim += int(len(space))
# Add prev-action/reward nodes to input to LSTM.
if self.use_prev_action:
self.num_outputs += self.action_dim
if self.use_prev_reward:
self.num_outputs += 1
# Define input layers.
input_layer = tf.keras.layers.Input(
shape=(None, self.num_outputs), name="inputs"
)
# Set self.num_outputs to the number of output nodes desired by the
# caller of this constructor.
self.num_outputs = num_outputs
state_in_h = tf.keras.layers.Input(shape=(self.cell_size,), name="h")
state_in_c = tf.keras.layers.Input(shape=(self.cell_size,), name="c")
seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)
# Preprocess observation with a hidden layer and send to LSTM cell
lstm_out, state_h, state_c = tf.keras.layers.LSTM(
self.cell_size, return_sequences=True, return_state=True, name="lstm"
)(
inputs=input_layer,
mask=tf.sequence_mask(seq_in),
initial_state=[state_in_h, state_in_c],
)
# Postprocess LSTM output with another hidden layer and compute values
logits = tf.keras.layers.Dense(
self.num_outputs, activation=tf.keras.activations.linear, name="logits"
)(lstm_out)
values = tf.keras.layers.Dense(1, activation=None, name="values")(lstm_out)
# Create the RNN model
self._rnn_model = tf.keras.Model(
inputs=[input_layer, seq_in, state_in_h, state_in_c],
outputs=[logits, values, state_h, state_c],
)
# Print out model summary in INFO logging mode.
if logger.isEnabledFor(logging.INFO):
self._rnn_model.summary()
# Add prev-a/r to this model's view, if required.
if model_config["lstm_use_prev_action"]:
self.view_requirements[SampleBatch.PREV_ACTIONS] = ViewRequirement(
SampleBatch.ACTIONS, space=self.action_space, shift=-1
)
if model_config["lstm_use_prev_reward"]:
self.view_requirements[SampleBatch.PREV_REWARDS] = ViewRequirement(
SampleBatch.REWARDS, shift=-1
)
@override(RecurrentNetwork)
def forward(
self,
input_dict: Dict[str, TensorType],
state: List[TensorType],
seq_lens: TensorType,
) -> Tuple[TensorType, List[TensorType]]:
assert seq_lens is not None
# Push obs through "unwrapped" net's `forward()` first.
wrapped_out, _ = self._wrapped_forward(input_dict, [], None)
# Concat. prev-action/reward if required.
prev_a_r = []
# Prev actions.
if self.model_config["lstm_use_prev_action"]:
prev_a = input_dict[SampleBatch.PREV_ACTIONS]
# If actions are not processed yet (in their original form as
# have been sent to environment):
# Flatten/one-hot into 1D array.
if self.model_config["_disable_action_flattening"]:
prev_a_r.append(
flatten_inputs_to_1d_tensor(
prev_a,
spaces_struct=self.action_space_struct,
time_axis=False,
)
)
# If actions are already flattened (but not one-hot'd yet!),
# one-hot discrete/multi-discrete actions here.
else:
if isinstance(self.action_space, (Discrete, MultiDiscrete)):
prev_a = one_hot(prev_a, self.action_space)
prev_a_r.append(
tf.reshape(tf.cast(prev_a, tf.float32), [-1, self.action_dim])
)
# Prev rewards.
if self.model_config["lstm_use_prev_reward"]:
prev_a_r.append(
tf.reshape(
tf.cast(input_dict[SampleBatch.PREV_REWARDS], tf.float32), [-1, 1]
)
)
# Concat prev. actions + rewards to the "main" input.
if prev_a_r:
wrapped_out = tf.concat([wrapped_out] + prev_a_r, axis=1)
# Push everything through our LSTM.
input_dict["obs_flat"] = wrapped_out
return super().forward(input_dict, state, seq_lens)
@override(RecurrentNetwork)
def forward_rnn(
self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType
) -> Tuple[TensorType, List[TensorType]]:
model_out, self._value_out, h, c = self._rnn_model([inputs, seq_lens] + state)
return model_out, [h, c]
@override(ModelV2)
def get_initial_state(self) -> List[np.ndarray]:
return [
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
]
@override(ModelV2)
def value_function(self) -> TensorType:
return tf.reshape(self._value_out, [-1])
|
LSTMWrapper
|
python
|
pypa__warehouse
|
tests/common/db/oidc.py
|
{
"start": 1305,
"end": 1671
}
|
class ____(WarehouseFactory):
class Meta:
model = GitLabPublisher
id = factory.Faker("uuid4", cast_to=None)
project = factory.Faker("pystr", max_chars=12)
namespace = factory.Faker("pystr", max_chars=12)
workflow_filepath = "subfolder/example.yml"
environment = "production"
issuer_url = "https://gitlab.com"
|
GitLabPublisherFactory
|
python
|
pytorch__pytorch
|
tools/test/test_docstring_linter.py
|
{
"start": 856,
"end": 6139
}
|
class ____(LinterTestCase):
LinterClass = DocstringLinter
maxDiff = 10_240
def test_python_code(self):
self.lint_test(TEST_FILE, ARGS)
@mock.patch("sys.stdout", new_callable=io.StringIO)
def test_end_to_end(self, mock_stdout):
argv_base = *ARGS, str(TEST_FILE), str(TEST_FILE2)
report = "--report"
write = "--write-grandfather"
out = _next_stdout(mock_stdout)
def run(name, *argv):
DocstringLinter(argv_base + argv).lint_all()
self.assertExpected(TEST_FILE2, next(out), name)
with tempfile.TemporaryDirectory() as td:
grandfather_file = f"{td}/grandfather.json"
grandfather = f"--grandfather={grandfather_file}"
# Find some failures
run("before.txt", grandfather)
# Rewrite grandfather file
run("before.json", grandfather, report, write)
actual = Path(grandfather_file).read_text()
self.assertExpected(TEST_FILE2, actual, "grandfather.json")
# Now there are no failures
run("after.txt", grandfather)
run("after.json", grandfather, report)
def test_report(self):
actual = _dumps(_data())
self.assertExpected(TEST_FILE, actual, "report.json")
def test_terse(self):
terse = make_terse(_data(), index_by_line=False)
actual = _dumps(terse)
self.assertExpected(TEST_FILE, actual, "terse.json")
def test_terse_line(self):
terse = make_terse(_data(), index_by_line=True)
actual = _dumps(terse)
self.assertExpected(TEST_FILE, actual, "terse.line.json")
def test_recursive(self):
recursive = make_recursive(_data())
actual = _dumps(recursive)
self.assertExpected(TEST_FILE, actual, "recursive.json")
def test_terse_recursive(self):
recursive = make_recursive(_data())
terse = make_terse(recursive, index_by_line=False)
actual = _dumps(terse)
self.assertExpected(TEST_FILE, actual, "recursive.terse.json")
def test_terse_line_recursive(self):
recursive = make_recursive(_data())
terse = make_terse(recursive, index_by_line=True)
actual = _dumps(terse)
self.assertExpected(TEST_FILE, actual, "recursive.terse.line.json")
def test_file_summary(self):
actual = _dumps(file_summary(_data(), report_all=True))
self.assertExpected(TEST_FILE, actual, "single.line.json")
def test_file_names(self):
f = DocstringLinter.make_file(TEST_BLOCK_NAMES)
actual = [b.full_name for b in f.blocks]
expected = [
"top",
"top.fun[1]",
"top.fun[1].sab",
"top.fun[1].sub",
"top.fun[2]",
"top.fun[2].sub[1]",
"top.fun[2].sub[2]",
"top.fun[3]",
"top.fun[3].sub",
"top.fun[3].sab",
"top.run",
"top.run.sub[1]",
"top.run.sub[2]",
]
self.assertEqual(actual, expected)
def test_decorators(self):
tests = itertools.product(INDENTS, DECORATORS.items())
for indent, (name, (expected, test_inputs)) in tests:
ind = indent * " "
for data in test_inputs:
prog = "".join(ind + d + "\n" for d in data)
pf = DocstringLinter.make_file(prog)
it = (i for i, t in enumerate(pf.tokens) if t.string == "def")
def_t = next(it, 0)
with self.subTest("Decorator", indent=indent, name=name, data=data):
actual = list(_get_decorators(pf.tokens, def_t))
self.assertEqual(actual, expected)
def _dumps(d: dict) -> str:
return json.dumps(d, sort_keys=True, indent=2) + "\n"
def _data(file=TEST_FILE):
docstring_file = DocstringLinter.make_file(file)
return [b.as_data() for b in docstring_file.blocks]
def _next_stdout(mock_stdout):
length = 0
while True:
s = mock_stdout.getvalue()
yield s[length:]
length = len(s)
CONSTANT = "A = 10"
COMMENT = "# a simple function"
OVER = "@override"
WRAPS = "@functools.wraps(fn)"
MASSIVE = (
"@some.long.path.very_long_function_name(",
" adjust_something_fiddly=1231232,",
" disable_something_critical=True,)",
)
MASSIVE_FLAT = (
"@some.long.path.very_long_function_name("
"adjust_something_fiddly=1231232,"
"disable_something_critical=True,)"
)
DEF = "def function():", " pass"
INDENTS = 0, 4, 8
DECORATORS = {
"none": (
[],
(
[],
[*DEF],
[COMMENT, *DEF],
[CONSTANT, "", COMMENT, *DEF],
[OVER, CONSTANT, *DEF], # Probably not even Python. :-)
),
),
"one": (
[OVER],
(
[OVER, *DEF],
[OVER, COMMENT, *DEF],
[OVER, COMMENT, "", *DEF],
[COMMENT, OVER, "", COMMENT, "", *DEF],
),
),
"two": (
[OVER, WRAPS],
(
[OVER, WRAPS, *DEF],
[COMMENT, OVER, COMMENT, WRAPS, COMMENT, *DEF],
),
),
"massive": (
[MASSIVE_FLAT, OVER],
([*MASSIVE, OVER, *DEF],),
),
}
|
TestDocstringLinter
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/exc.py
|
{
"start": 10580,
"end": 10886
}
|
class ____(InvalidRequestError):
"""A database result was required but none was found.
.. versionchanged:: 1.4 This exception is now part of the
``sqlalchemy.exc`` module in Core, moved from the ORM. The symbol
remains importable from ``sqlalchemy.orm.exc``.
"""
|
NoResultFound
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/interpreter.py
|
{
"start": 12011,
"end": 13773
}
|
class ____(enum.Enum):
"""Different types of op resolvers for Tensorflow Lite.
* `AUTO`: Indicates the op resolver that is chosen by default in TfLite
Python, which is the "BUILTIN" as described below.
* `BUILTIN`: Indicates the op resolver for built-in ops with optimized kernel
implementation.
* `BUILTIN_REF`: Indicates the op resolver for built-in ops with reference
kernel implementation. It's generally used for testing and debugging.
* `BUILTIN_WITHOUT_DEFAULT_DELEGATES`: Indicates the op resolver for
built-in ops with optimized kernel implementation, but it will disable
the application of default TfLite delegates (like the XNNPACK delegate) to
the model graph. Generally this should not be used unless there are issues
with the default configuration.
"""
# Corresponds to an op resolver chosen by default in TfLite Python.
AUTO = 0
# Corresponds to tflite::ops::builtin::BuiltinOpResolver in C++.
BUILTIN = 1
# Corresponds to tflite::ops::builtin::BuiltinRefOpResolver in C++.
BUILTIN_REF = 2
# Corresponds to
# tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates in C++.
BUILTIN_WITHOUT_DEFAULT_DELEGATES = 3
def _get_op_resolver_id(op_resolver_type=OpResolverType.AUTO):
"""Get a integer identifier for the op resolver."""
# Note: the integer identifier value needs to be same w/ op resolver ids
# defined in interpreter_wrapper/interpreter_wrapper.cc.
return {
# Note AUTO and BUILTIN currently share the same identifier.
OpResolverType.AUTO: 1,
OpResolverType.BUILTIN: 1,
OpResolverType.BUILTIN_REF: 2,
OpResolverType.BUILTIN_WITHOUT_DEFAULT_DELEGATES: 3
}.get(op_resolver_type, None)
@_tf_export('lite.Interpreter')
|
OpResolverType
|
python
|
ray-project__ray
|
python/ray/serve/tests/unit/test_deployment_rank_manager.py
|
{
"start": 370,
"end": 871
}
|
class ____:
"""Mock replica for testing without heavy dependencies."""
def __init__(
self,
replica_id: str,
deployment_name: str = "test_deployment",
app_name: str = "test_app",
):
self.replica_id = ReplicaID(
unique_id=replica_id,
deployment_id=DeploymentID(name=deployment_name, app_name=app_name),
)
def __str__(self):
return f"MockDeploymentReplica(replica_id={self.replica_id})"
|
MockDeploymentReplica
|
python
|
doocs__leetcode
|
solution/0500-0599/0540.Single Element in a Sorted Array/Solution.py
|
{
"start": 0,
"end": 294
}
|
class ____:
def singleNonDuplicate(self, nums: List[int]) -> int:
l, r = 0, len(nums) - 1
while l < r:
mid = (l + r) >> 1
if nums[mid] != nums[mid ^ 1]:
r = mid
else:
l = mid + 1
return nums[l]
|
Solution
|
python
|
ray-project__ray
|
python/ray/llm/_internal/common/callbacks/base.py
|
{
"start": 4620,
"end": 5094
}
|
class ____:
"""Configuration for the callback to be used in LLMConfig"""
callback_class: Union[str, Type[CallbackBase]] = CallbackBase
"""Class to use for the callback. Can be custom user defined class"""
callback_kwargs: Dict[str, Any] = field(default_factory=dict)
"""Keyword arguments to pass to the Callback class at construction."""
raise_error_on_callback: bool = True
"""Whether to raise an error if a callback method fails."""
|
CallbackConfig
|
python
|
getsentry__sentry
|
src/sentry/hybridcloud/rpc/sig.py
|
{
"start": 263,
"end": 482
}
|
class ____(Exception):
def __init__(self, signature: SerializableFunctionSignature, message: str) -> None:
super().__init__(f"{signature.generate_name('.')}: {message}")
|
_SerializableFunctionSignatureException
|
python
|
walkccc__LeetCode
|
solutions/771. Jewels and Stones/771.py
|
{
"start": 0,
"end": 163
}
|
class ____:
def numJewelsInStones(self, jewels: str, stones: str) -> int:
jewelsSet = set(jewels)
return sum(stone in jewelsSet for stone in stones)
|
Solution
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 63035,
"end": 65126
}
|
class ____(BaseModel):
"""
Task serializer for responses.
"""
task_id: Annotated[str | None, Field(title="Task Id")] = None
task_display_name: Annotated[str | None, Field(title="Task Display Name")] = None
owner: Annotated[str | None, Field(title="Owner")] = None
start_date: Annotated[datetime | None, Field(title="Start Date")] = None
end_date: Annotated[datetime | None, Field(title="End Date")] = None
trigger_rule: Annotated[str | None, Field(title="Trigger Rule")] = None
depends_on_past: Annotated[bool, Field(title="Depends On Past")]
wait_for_downstream: Annotated[bool, Field(title="Wait For Downstream")]
retries: Annotated[float | None, Field(title="Retries")] = None
queue: Annotated[str | None, Field(title="Queue")] = None
pool: Annotated[str | None, Field(title="Pool")] = None
pool_slots: Annotated[float | None, Field(title="Pool Slots")] = None
execution_timeout: TimeDelta | None = None
retry_delay: TimeDelta | None = None
retry_exponential_backoff: Annotated[float, Field(title="Retry Exponential Backoff")]
priority_weight: Annotated[float | None, Field(title="Priority Weight")] = None
weight_rule: Annotated[str | None, Field(title="Weight Rule")] = None
ui_color: Annotated[str | None, Field(title="Ui Color")] = None
ui_fgcolor: Annotated[str | None, Field(title="Ui Fgcolor")] = None
template_fields: Annotated[list[str] | None, Field(title="Template Fields")] = None
downstream_task_ids: Annotated[list[str] | None, Field(title="Downstream Task Ids")] = None
doc_md: Annotated[str | None, Field(title="Doc Md")] = None
operator_name: Annotated[str | None, Field(title="Operator Name")] = None
params: Annotated[dict[str, Any] | None, Field(title="Params")] = None
class_ref: Annotated[dict[str, Any] | None, Field(title="Class Ref")] = None
is_mapped: Annotated[bool | None, Field(title="Is Mapped")] = None
extra_links: Annotated[
list[str], Field(description="Extract and return extra_links.", title="Extra Links")
]
|
TaskResponse
|
python
|
networkx__networkx
|
networkx/tests/test_relabel.py
|
{
"start": 143,
"end": 14554
}
|
class ____:
def test_convert_node_labels_to_integers(self):
# test that empty graph converts fine for all options
G = empty_graph()
H = nx.convert_node_labels_to_integers(G, 100)
assert list(H.nodes()) == []
assert list(H.edges()) == []
for opt in ["default", "sorted", "increasing degree", "decreasing degree"]:
G = empty_graph()
H = nx.convert_node_labels_to_integers(G, 100, ordering=opt)
assert list(H.nodes()) == []
assert list(H.edges()) == []
G = empty_graph()
G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")])
H = nx.convert_node_labels_to_integers(G)
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
H = nx.convert_node_labels_to_integers(G, 1000)
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
assert nodes_equal(H.nodes(), [1000, 1001, 1002, 1003])
H = nx.convert_node_labels_to_integers(G, ordering="increasing degree")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
assert H.degree(0) == 1
assert H.degree(1) == 2
assert H.degree(2) == 2
assert H.degree(3) == 3
H = nx.convert_node_labels_to_integers(G, ordering="decreasing degree")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
assert H.degree(0) == 3
assert H.degree(1) == 2
assert H.degree(2) == 2
assert H.degree(3) == 1
H = nx.convert_node_labels_to_integers(
G, ordering="increasing degree", label_attribute="label"
)
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
assert H.degree(0) == 1
assert H.degree(1) == 2
assert H.degree(2) == 2
assert H.degree(3) == 3
# check mapping
assert H.nodes[3]["label"] == "C"
assert H.nodes[0]["label"] == "D"
assert H.nodes[1]["label"] == "A" or H.nodes[2]["label"] == "A"
assert H.nodes[1]["label"] == "B" or H.nodes[2]["label"] == "B"
def test_convert_to_integers2(self):
G = empty_graph()
G.add_edges_from([("C", "D"), ("A", "B"), ("A", "C"), ("B", "C")])
H = nx.convert_node_labels_to_integers(G, ordering="sorted")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert sorted(degH) == sorted(degG)
H = nx.convert_node_labels_to_integers(
G, ordering="sorted", label_attribute="label"
)
assert H.nodes[0]["label"] == "A"
assert H.nodes[1]["label"] == "B"
assert H.nodes[2]["label"] == "C"
assert H.nodes[3]["label"] == "D"
def test_convert_to_integers_raise(self):
with pytest.raises(nx.NetworkXError):
G = nx.Graph()
H = nx.convert_node_labels_to_integers(G, ordering="increasing age")
def test_relabel_nodes_copy(self):
G = nx.empty_graph()
G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")])
mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"}
H = nx.relabel_nodes(G, mapping)
assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"])
def test_relabel_nodes_function(self):
G = nx.empty_graph()
G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")])
# function mapping no longer encouraged but works
def mapping(n):
return ord(n)
H = nx.relabel_nodes(G, mapping)
assert nodes_equal(H.nodes(), [65, 66, 67, 68])
def test_relabel_nodes_callable_type(self):
G = nx.path_graph(4)
H = nx.relabel_nodes(G, str)
assert nodes_equal(H.nodes, ["0", "1", "2", "3"])
@pytest.mark.parametrize("non_mc", ("0123", ["0", "1", "2", "3"]))
def test_relabel_nodes_non_mapping_or_callable(self, non_mc):
"""If `mapping` is neither a Callable or a Mapping, an exception
should be raised."""
G = nx.path_graph(4)
with pytest.raises(AttributeError):
nx.relabel_nodes(G, non_mc)
def test_relabel_nodes_graph(self):
G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")])
mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"}
H = nx.relabel_nodes(G, mapping)
assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"])
def test_relabel_nodes_orderedgraph(self):
G = nx.Graph()
G.add_nodes_from([1, 2, 3])
G.add_edges_from([(1, 3), (2, 3)])
mapping = {1: "a", 2: "b", 3: "c"}
H = nx.relabel_nodes(G, mapping)
assert list(H.nodes) == ["a", "b", "c"]
def test_relabel_nodes_digraph(self):
G = nx.DiGraph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")])
mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"}
H = nx.relabel_nodes(G, mapping, copy=False)
assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"])
def test_relabel_nodes_multigraph(self):
G = nx.MultiGraph([("a", "b"), ("a", "b")])
mapping = {"a": "aardvark", "b": "bear"}
G = nx.relabel_nodes(G, mapping, copy=False)
assert nodes_equal(G.nodes(), ["aardvark", "bear"])
assert edges_equal(G.edges(), [("aardvark", "bear"), ("aardvark", "bear")])
def test_relabel_nodes_multidigraph(self):
G = nx.MultiDiGraph([("a", "b"), ("a", "b")])
mapping = {"a": "aardvark", "b": "bear"}
G = nx.relabel_nodes(G, mapping, copy=False)
assert nodes_equal(G.nodes(), ["aardvark", "bear"])
assert edges_equal(
G.edges(), [("aardvark", "bear"), ("aardvark", "bear")], directed=True
)
def test_relabel_isolated_nodes_to_same(self):
G = nx.Graph()
G.add_nodes_from(range(4))
mapping = {1: 1}
H = nx.relabel_nodes(G, mapping, copy=False)
assert nodes_equal(H.nodes(), list(range(4)))
def test_relabel_nodes_missing(self):
G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")])
mapping = {0: "aardvark"}
# copy=True
H = nx.relabel_nodes(G, mapping, copy=True)
assert nodes_equal(H.nodes, G.nodes)
# copy=False
GG = G.copy()
nx.relabel_nodes(G, mapping, copy=False)
assert nodes_equal(G.nodes, GG.nodes)
def test_relabel_copy_name(self):
G = nx.Graph()
H = nx.relabel_nodes(G, {}, copy=True)
assert H.graph == G.graph
H = nx.relabel_nodes(G, {}, copy=False)
assert H.graph == G.graph
G.name = "first"
H = nx.relabel_nodes(G, {}, copy=True)
assert H.graph == G.graph
H = nx.relabel_nodes(G, {}, copy=False)
assert H.graph == G.graph
def test_relabel_toposort(self):
K5 = nx.complete_graph(4)
G = nx.complete_graph(4)
G = nx.relabel_nodes(G, {i: i + 1 for i in range(4)}, copy=False)
assert nx.is_isomorphic(K5, G)
G = nx.complete_graph(4)
G = nx.relabel_nodes(G, {i: i - 1 for i in range(4)}, copy=False)
assert nx.is_isomorphic(K5, G)
def test_relabel_selfloop(self):
G = nx.DiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: "One", 2: "Two", 3: "Three"}, copy=False)
assert nodes_equal(G.nodes(), ["One", "Three", "Two"])
G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: "One", 2: "Two", 3: "Three"}, copy=False)
assert nodes_equal(G.nodes(), ["One", "Three", "Two"])
G = nx.MultiDiGraph([(1, 1)])
G = nx.relabel_nodes(G, {1: 0}, copy=False)
assert nodes_equal(G.nodes(), [0])
def test_relabel_multidigraph_inout_merge_nodes(self):
for MG in (nx.MultiGraph, nx.MultiDiGraph):
for cc in (True, False):
G = MG([(0, 4), (1, 4), (4, 2), (4, 3)])
G[0][4][0]["value"] = "a"
G[1][4][0]["value"] = "b"
G[4][2][0]["value"] = "c"
G[4][3][0]["value"] = "d"
G.add_edge(0, 4, key="x", value="e")
G.add_edge(4, 3, key="x", value="f")
mapping = {0: 9, 1: 9, 2: 9, 3: 9}
H = nx.relabel_nodes(G, mapping, copy=cc)
# No ordering on keys enforced
assert {"value": "a"} in H[9][4].values()
assert {"value": "b"} in H[9][4].values()
assert {"value": "c"} in H[4][9].values()
assert len(H[4][9]) == 3 if G.is_directed() else 6
assert {"value": "d"} in H[4][9].values()
assert {"value": "e"} in H[9][4].values()
assert {"value": "f"} in H[4][9].values()
assert len(H[9][4]) == 3 if G.is_directed() else 6
def test_relabel_multigraph_merge_inplace(self):
G = nx.MultiGraph([(0, 1), (0, 2), (0, 3), (0, 1), (0, 2), (0, 3)])
G[0][1][0]["value"] = "a"
G[0][2][0]["value"] = "b"
G[0][3][0]["value"] = "c"
mapping = {1: 4, 2: 4, 3: 4}
nx.relabel_nodes(G, mapping, copy=False)
# No ordering on keys enforced
assert {"value": "a"} in G[0][4].values()
assert {"value": "b"} in G[0][4].values()
assert {"value": "c"} in G[0][4].values()
def test_relabel_multidigraph_merge_inplace(self):
G = nx.MultiDiGraph([(0, 1), (0, 2), (0, 3)])
G[0][1][0]["value"] = "a"
G[0][2][0]["value"] = "b"
G[0][3][0]["value"] = "c"
mapping = {1: 4, 2: 4, 3: 4}
nx.relabel_nodes(G, mapping, copy=False)
# No ordering on keys enforced
assert {"value": "a"} in G[0][4].values()
assert {"value": "b"} in G[0][4].values()
assert {"value": "c"} in G[0][4].values()
def test_relabel_multidigraph_inout_copy(self):
G = nx.MultiDiGraph([(0, 4), (1, 4), (4, 2), (4, 3)])
G[0][4][0]["value"] = "a"
G[1][4][0]["value"] = "b"
G[4][2][0]["value"] = "c"
G[4][3][0]["value"] = "d"
G.add_edge(0, 4, key="x", value="e")
G.add_edge(4, 3, key="x", value="f")
mapping = {0: 9, 1: 9, 2: 9, 3: 9}
H = nx.relabel_nodes(G, mapping, copy=True)
# No ordering on keys enforced
assert {"value": "a"} in H[9][4].values()
assert {"value": "b"} in H[9][4].values()
assert {"value": "c"} in H[4][9].values()
assert len(H[4][9]) == 3
assert {"value": "d"} in H[4][9].values()
assert {"value": "e"} in H[9][4].values()
assert {"value": "f"} in H[4][9].values()
assert len(H[9][4]) == 3
def test_relabel_multigraph_merge_copy(self):
G = nx.MultiGraph([(0, 1), (0, 2), (0, 3)])
G[0][1][0]["value"] = "a"
G[0][2][0]["value"] = "b"
G[0][3][0]["value"] = "c"
mapping = {1: 4, 2: 4, 3: 4}
H = nx.relabel_nodes(G, mapping, copy=True)
assert {"value": "a"} in H[0][4].values()
assert {"value": "b"} in H[0][4].values()
assert {"value": "c"} in H[0][4].values()
def test_relabel_multidigraph_merge_copy(self):
G = nx.MultiDiGraph([(0, 1), (0, 2), (0, 3)])
G[0][1][0]["value"] = "a"
G[0][2][0]["value"] = "b"
G[0][3][0]["value"] = "c"
mapping = {1: 4, 2: 4, 3: 4}
H = nx.relabel_nodes(G, mapping, copy=True)
assert {"value": "a"} in H[0][4].values()
assert {"value": "b"} in H[0][4].values()
assert {"value": "c"} in H[0][4].values()
def test_relabel_multigraph_nonnumeric_key(self):
for MG in (nx.MultiGraph, nx.MultiDiGraph):
for cc in (True, False):
G = nx.MultiGraph()
G.add_edge(0, 1, key="I", value="a")
G.add_edge(0, 2, key="II", value="b")
G.add_edge(0, 3, key="II", value="c")
mapping = {1: 4, 2: 4, 3: 4}
nx.relabel_nodes(G, mapping, copy=False)
assert {"value": "a"} in G[0][4].values()
assert {"value": "b"} in G[0][4].values()
assert {"value": "c"} in G[0][4].values()
assert 0 in G[0][4]
assert "I" in G[0][4]
assert "II" in G[0][4]
def test_relabel_circular(self):
G = nx.path_graph(3)
mapping = {0: 1, 1: 0}
H = nx.relabel_nodes(G, mapping, copy=True)
with pytest.raises(nx.NetworkXUnfeasible):
H = nx.relabel_nodes(G, mapping, copy=False)
def test_relabel_preserve_node_order_full_mapping_with_copy_true(self):
G = nx.path_graph(3)
original_order = list(G.nodes())
mapping = {2: "a", 1: "b", 0: "c"} # dictionary keys out of order on purpose
H = nx.relabel_nodes(G, mapping, copy=True)
new_order = list(H.nodes())
assert [mapping.get(i, i) for i in original_order] == new_order
def test_relabel_preserve_node_order_full_mapping_with_copy_false(self):
G = nx.path_graph(3)
original_order = list(G)
mapping = {2: "a", 1: "b", 0: "c"} # dictionary keys out of order on purpose
H = nx.relabel_nodes(G, mapping, copy=False)
new_order = list(H)
assert [mapping.get(i, i) for i in original_order] == new_order
def test_relabel_preserve_node_order_partial_mapping_with_copy_true(self):
G = nx.path_graph(3)
original_order = list(G)
mapping = {1: "a", 0: "b"} # partial mapping and keys out of order on purpose
H = nx.relabel_nodes(G, mapping, copy=True)
new_order = list(H)
assert [mapping.get(i, i) for i in original_order] == new_order
def test_relabel_preserve_node_order_partial_mapping_with_copy_false(self):
G = nx.path_graph(3)
original_order = list(G)
mapping = {1: "a", 0: "b"} # partial mapping and keys out of order on purpose
H = nx.relabel_nodes(G, mapping, copy=False)
new_order = list(H)
assert [mapping.get(i, i) for i in original_order] != new_order
|
TestRelabel
|
python
|
great-expectations__great_expectations
|
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/metrics/data_profiler_metrics/data_profiler_profile_numeric_columns.py
|
{
"start": 546,
"end": 2357
}
|
class ____(DataProfilerProfileMetricProvider):
metric_name = "data_profiler.profile_numeric_columns"
value_keys = ("profile_path",)
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine,
metric_domain_kwargs,
metric_value_kwargs,
metrics,
runtime_configuration,
):
profile_report = metrics["data_profiler.profile_report"]
numeric_columns = []
for col in profile_report["data_stats"]:
dtype = col["data_type"]
if dtype == "int" or dtype == "float":
numeric_columns.append(col["column_name"])
return numeric_columns
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying
the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if metric.metric_name == "data_profiler.profile_numeric_columns":
dependencies["data_profiler.profile_report"] = MetricConfiguration(
metric_name="data_profiler.profile_report",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
|
DataProfilerProfileNumericColumns
|
python
|
getsentry__responses
|
responses/__init__.py
|
{
"start": 3202,
"end": 6888
}
|
class ____:
"""Class to mock up built-in False boolean.
Used for backwards compatibility, see
https://github.com/getsentry/responses/issues/464
"""
def __bool__(self) -> bool:
return False
def urlencoded_params_matcher(params: Optional[Dict[str, str]]) -> Callable[..., Any]:
warn(
"Function is deprecated. Use 'from responses.matchers import urlencoded_params_matcher'",
DeprecationWarning,
)
return _urlencoded_params_matcher(params)
def json_params_matcher(params: Optional[Dict[str, Any]]) -> Callable[..., Any]:
warn(
"Function is deprecated. Use 'from responses.matchers import json_params_matcher'",
DeprecationWarning,
)
return _json_params_matcher(params)
def _has_unicode(s: str) -> bool:
return any(ord(char) > 128 for char in s)
def _clean_unicode(url: str) -> str:
"""Clean up URLs, which use punycode to handle unicode chars.
Applies percent encoding to URL path and query if required.
Parameters
----------
url : str
URL that should be cleaned from unicode
Returns
-------
str
Cleaned URL
"""
urllist = list(urlsplit(url))
netloc = urllist[1]
if _has_unicode(netloc):
domains = netloc.split(".")
for i, d in enumerate(domains):
if _has_unicode(d):
d = "xn--" + d.encode("punycode").decode("ascii")
domains[i] = d
urllist[1] = ".".join(domains)
url = urlunsplit(urllist)
# Clean up path/query/params, which use url-encoding to handle unicode chars
chars = list(url)
for i, x in enumerate(chars):
if ord(x) > 128:
chars[i] = quote(x)
return "".join(chars)
def get_wrapped(
func: Callable[..., Any],
responses: "RequestsMock",
*,
registry: Optional[Any] = None,
assert_all_requests_are_fired: Optional[bool] = None,
) -> Callable[..., Any]:
"""Wrap provided function inside ``responses`` context manager.
Provides a synchronous or asynchronous wrapper for the function.
Parameters
----------
func : Callable
Function to wrap.
responses : RequestsMock
Mock object that is used as context manager.
registry : FirstMatchRegistry, optional
Custom registry that should be applied. See ``responses.registries``
assert_all_requests_are_fired : bool
Raise an error if not all registered responses were executed.
Returns
-------
Callable
Wrapped function
"""
assert_mock = std_mock.patch.object(
target=responses,
attribute="assert_all_requests_are_fired",
new=assert_all_requests_are_fired,
)
if inspect.iscoroutinefunction(func):
# set asynchronous wrapper if requestor function is asynchronous
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any: # type: ignore[misc]
if registry is not None:
responses._set_registry(registry)
with assert_mock, responses:
return await func(*args, **kwargs)
else:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any: # type: ignore[misc]
if registry is not None:
responses._set_registry(registry)
with assert_mock, responses:
# set 'assert_all_requests_are_fired' temporarily for a single run.
# Mock automatically unsets to avoid leakage to another decorated
# function since we still apply the value on 'responses.mock' object
return func(*args, **kwargs)
return wrapper
|
FalseBool
|
python
|
PyCQA__pylint
|
pylint/checkers/base_checker.py
|
{
"start": 8594,
"end": 8887
}
|
class ____(BaseChecker):
"""Base class for checkers that want to have access to the token stream."""
@abc.abstractmethod
def process_tokens(self, tokens: list[TokenInfo]) -> None:
"""Should be overridden by subclasses."""
raise NotImplementedError()
|
BaseTokenChecker
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/writeonly.py
|
{
"start": 15912,
"end": 16352
}
|
class ____:
"""simplified CollectionAdapter for internal API consistency"""
data: Collection[Any]
def __init__(self, data: Collection[Any]):
self.data = data
def __iter__(self) -> Iterator[Any]:
return iter(self.data)
def _reset_empty(self) -> None:
pass
def __len__(self) -> int:
return len(self.data)
def __bool__(self) -> bool:
return True
|
_DynamicCollectionAdapter
|
python
|
PyCQA__pylint
|
tests/functional/s/super/super_checks.py
|
{
"start": 1247,
"end": 1292
}
|
class ____:
"""Just an empty class."""
|
Empty
|
python
|
langchain-ai__langchain
|
libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py
|
{
"start": 653,
"end": 1699
}
|
class ____(AsyncDocumentIndexTestSuite):
# Something funky is going on with mypy and async pytest fixture
@pytest.fixture
@override
async def index(self) -> AsyncGenerator[DocumentIndex, None]:
yield InMemoryDocumentIndex() # noqa: PT022
def test_sync_retriever() -> None:
index = InMemoryDocumentIndex()
documents = [
Document(id="1", page_content="hello world"),
Document(id="2", page_content="goodbye cat"),
]
index.upsert(documents)
assert index.invoke("hello") == [documents[0], documents[1]]
assert index.invoke("cat") == [documents[1], documents[0]]
async def test_async_retriever() -> None:
index = InMemoryDocumentIndex()
documents = [
Document(id="1", page_content="hello world"),
Document(id="2", page_content="goodbye cat"),
]
await index.aupsert(documents)
assert (await index.ainvoke("hello")) == [documents[0], documents[1]]
assert (await index.ainvoke("cat")) == [documents[1], documents[0]]
|
TestAsyncDocumentIndexerTestSuite
|
python
|
apache__thrift
|
test/py.twisted/test_suite.py
|
{
"start": 1473,
"end": 2705
}
|
class ____:
def __init__(self):
self.onewaysQueue = defer.DeferredQueue()
def testVoid(self):
pass
def testString(self, s):
return s
def testByte(self, b):
return b
def testI16(self, i16):
return i16
def testI32(self, i32):
return i32
def testI64(self, i64):
return i64
def testDouble(self, dub):
return dub
def testBinary(self, thing):
return thing
def testStruct(self, thing):
return thing
def testException(self, s):
if s == 'Xception':
raise Xception(1001, s)
elif s == "throw_undeclared":
raise ValueError("foo")
def testOneway(self, seconds):
def fireOneway(t):
self.onewaysQueue.put((t, time.time(), seconds))
reactor.callLater(seconds, fireOneway, time.time())
raise Exception('')
def testNest(self, thing):
return thing
def testMap(self, thing):
return thing
def testSet(self, thing):
return thing
def testList(self, thing):
return thing
def testEnum(self, thing):
return thing
def testTypedef(self, thing):
return thing
|
TestHandler
|
python
|
apache__airflow
|
airflow-core/src/airflow/traces/tracer.py
|
{
"start": 4816,
"end": 6456
}
|
class ____:
"""If no Tracer is configured, EmptyTracer is used as a fallback."""
@classmethod
def get_tracer(
cls,
component: str,
trace_id: int | None = None,
span_id: int | None = None,
):
"""Get a tracer using provided node id and trace id."""
return cls
@classmethod
def start_span(
cls,
span_name: str,
component: str | None = None,
parent_sc=None,
span_id=None,
links=None,
start_time=None,
) -> EmptySpan:
"""Start a span."""
return EMPTY_SPAN
@classmethod
def use_span(cls, span) -> EmptySpan:
"""Use a span as current."""
return EMPTY_SPAN
@classmethod
def get_current_span(self) -> EmptySpan:
"""Get the current span."""
return EMPTY_SPAN
@classmethod
def start_root_span(
cls, span_name=None, component=None, start_time=None, start_as_current=True
) -> EmptySpan:
"""Start a root span."""
return EMPTY_SPAN
@classmethod
def start_child_span(
cls,
span_name=None,
parent_context=None,
component=None,
links=None,
start_time=None,
start_as_current=True,
) -> EmptySpan:
"""Start a child span."""
return EMPTY_SPAN
@classmethod
def inject(cls):
"""Inject the current span context into a carrier and return it."""
return {}
@classmethod
def extract(cls, carrier) -> EmptyContext:
"""Extract the span context from a provided carrier."""
return EMPTY_CTX
|
EmptyTrace
|
python
|
astropy__astropy
|
astropy/io/fits/header.py
|
{
"start": 1502,
"end": 67693
}
|
class ____:
"""
FITS header class. This class exposes both a dict-like interface and a
list-like interface to FITS headers.
The header may be indexed by keyword and, like a dict, the associated value
will be returned. When the header contains cards with duplicate keywords,
only the value of the first card with the given keyword will be returned.
It is also possible to use a 2-tuple as the index in the form (keyword,
n)--this returns the n-th value with that keyword, in the case where there
are duplicate keywords.
For example::
>>> header['NAXIS']
0
>>> header[('FOO', 1)] # Return the value of the second FOO keyword
'foo'
The header may also be indexed by card number::
>>> header[0] # Return the value of the first card in the header
'T'
Commentary keywords such as HISTORY and COMMENT are special cases: When
indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all
the HISTORY/COMMENT values is returned::
>>> header['HISTORY']
This is the first history entry in this header.
This is the second history entry in this header.
...
See the Astropy documentation for more details on working with headers.
Notes
-----
Although FITS keywords must be exclusively upper case, retrieving an item
in a `Header` object is case insensitive.
"""
def __init__(self, cards=[], copy=False):
"""
Construct a `Header` from an iterable and/or text file.
Parameters
----------
cards : list of `Card`, optional
The cards to initialize the header with. Also allowed are other
`Header` (or `dict`-like) objects.
.. versionchanged:: 1.2
Allowed ``cards`` to be a `dict`-like object.
copy : bool, optional
If ``True`` copies the ``cards`` if they were another `Header`
instance.
Default is ``False``.
.. versionadded:: 1.3
"""
self.clear()
if isinstance(cards, Header):
if copy:
cards = cards.copy()
cards = cards.cards
elif isinstance(cards, dict):
cards = cards.items()
for card in cards:
self.append(card, end=True)
self._modified = False
def __len__(self):
return len(self._cards)
def __iter__(self):
for card in self._cards:
yield card.keyword
def __contains__(self, keyword):
if keyword in self._keyword_indices or keyword in self._rvkc_indices:
# For the most common case (single, standard form keyword lookup)
# this will work and is an O(1) check. If it fails that doesn't
# guarantee absence, just that we have to perform the full set of
# checks in self._cardindex
return True
try:
self._cardindex(keyword)
except (KeyError, IndexError):
return False
return True
def __getitem__(self, key):
if isinstance(key, slice):
return self.__class__([copy.copy(c) for c in self._cards[key]])
elif self._haswildcard(key):
return self.__class__(
[copy.copy(self._cards[idx]) for idx in self._wildcardmatch(key)]
)
elif isinstance(key, str):
key = key.strip()
if key.upper() in _commentary_keywords:
key = key.upper()
# Special case for commentary cards
return _HeaderCommentaryCards(self, key)
if isinstance(key, tuple):
keyword = key[0]
else:
keyword = key
card = self._cards[self._cardindex(key)]
if card.field_specifier is not None and keyword == card.rawkeyword:
# This is RVKC; if only the top-level keyword was specified return
# the raw value, not the parsed out float value
return card.rawvalue
value = card.value
if value == UNDEFINED:
return None
return value
def __setitem__(self, key, value):
if self._set_slice(key, value, self):
return
if isinstance(value, tuple):
if len(value) > 2:
raise ValueError(
"A Header item may be set with either a scalar value, "
"a 1-tuple containing a scalar value, or a 2-tuple "
"containing a scalar value and comment string."
)
if len(value) == 1:
value, comment = value[0], None
if value is None:
value = UNDEFINED
elif len(value) == 2:
value, comment = value
if value is None:
value = UNDEFINED
if comment is None:
comment = ""
else:
comment = None
card = None
if isinstance(key, numbers.Integral):
card = self._cards[key]
elif isinstance(key, tuple):
card = self._cards[self._cardindex(key)]
if value is None:
value = UNDEFINED
if card:
card.value = value
if comment is not None:
card.comment = comment
if card._modified:
self._modified = True
else:
# If we get an IndexError that should be raised; we don't allow
# assignment to non-existing indices
self._update((key, value, comment))
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# This is very inefficient but it's not a commonly used feature.
# If someone out there complains that they make heavy use of slice
# deletions and it's too slow, well, we can worry about it then
# [the solution is not too complicated--it would be wait 'til all
# the cards are deleted before updating _keyword_indices rather
# than updating it once for each card that gets deleted]
if isinstance(key, slice):
indices = range(*key.indices(len(self)))
# If the slice step is backwards we want to reverse it, because
# it will be reversed in a few lines...
if key.step and key.step < 0:
indices = reversed(indices)
else:
indices = self._wildcardmatch(key)
for idx in reversed(indices):
del self[idx]
return
elif isinstance(key, str):
# delete ALL cards with the same keyword name
key = Card.normalize_keyword(key)
indices = self._keyword_indices
if key not in self._keyword_indices:
indices = self._rvkc_indices
if key not in indices:
# if keyword is not present raise KeyError.
# To delete keyword without caring if they were present,
# Header.remove(Keyword) can be used with optional argument ignore_missing as True
raise KeyError(f"Keyword '{key}' not found.")
for idx in reversed(indices[key]):
# Have to copy the indices list since it will be modified below
del self[idx]
return
idx = self._cardindex(key)
card = self._cards[idx]
keyword = card.keyword
del self._cards[idx]
keyword = Card.normalize_keyword(keyword)
indices = self._keyword_indices[keyword]
indices.remove(idx)
if not indices:
del self._keyword_indices[keyword]
# Also update RVKC indices if necessary :/
if card.field_specifier is not None:
indices = self._rvkc_indices[card.rawkeyword]
indices.remove(idx)
if not indices:
del self._rvkc_indices[card.rawkeyword]
# We also need to update all other indices
self._updateindices(idx, increment=False)
self._modified = True
def __repr__(self):
return self.tostring(sep="\n", endcard=False, padding=False)
def __str__(self):
return self.tostring()
def __eq__(self, other):
"""
Two Headers are equal only if they have the exact same string
representation.
"""
return str(self) == str(other)
def __add__(self, other):
temp = self.copy(strip=False)
temp.extend(other)
return temp
def __iadd__(self, other):
self.extend(other)
return self
def _ipython_key_completions_(self):
return self.__iter__()
@property
def cards(self):
"""
The underlying physical cards that make up this Header; it can be
looked at, but it should not be modified directly.
"""
return _CardAccessor(self)
@property
def comments(self):
"""
View the comments associated with each keyword, if any.
For example, to see the comment on the NAXIS keyword:
>>> header.comments['NAXIS']
number of data axes
Comments can also be updated through this interface:
>>> header.comments['NAXIS'] = 'Number of data axes'
"""
return _HeaderComments(self)
@property
def _modified(self):
"""
Whether or not the header has been modified; this is a property so that
it can also check each card for modifications--cards may have been
modified directly without the header containing it otherwise knowing.
"""
modified_cards = any(c._modified for c in self._cards)
if modified_cards:
# If any cards were modified then by definition the header was
# modified
self.__dict__["_modified"] = True
return self.__dict__["_modified"]
@_modified.setter
def _modified(self, val):
self.__dict__["_modified"] = val
@classmethod
def fromstring(cls, data, sep=""):
"""
Creates an HDU header from a byte string containing the entire header
data.
Parameters
----------
data : str or bytes
String or bytes containing the entire header. In the case of bytes
they will be decoded using latin-1 (only plain ASCII characters are
allowed in FITS headers but latin-1 allows us to retain any invalid
bytes that might appear in malformatted FITS files).
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file). In general this is only used in cases where a header was
printed as text (e.g. with newlines after each card) and you want
to create a new `Header` from it by copy/pasting.
Examples
--------
>>> from astropy.io.fits import Header
>>> hdr = Header({'SIMPLE': True})
>>> Header.fromstring(hdr.tostring()) == hdr
True
If you want to create a `Header` from printed text it's not necessary
to have the exact binary structure as it would appear in a FITS file,
with the full 80 byte card length. Rather, each "card" can end in a
newline and does not have to be padded out to a full card length as
long as it "looks like" a FITS header:
>>> hdr = Header.fromstring(\"\"\"\\
... SIMPLE = T / conforms to FITS standard
... BITPIX = 8 / array data type
... NAXIS = 0 / number of array dimensions
... EXTEND = T
... \"\"\", sep='\\n')
>>> hdr['SIMPLE']
True
>>> hdr['BITPIX']
8
>>> len(hdr)
4
Returns
-------
`Header`
A new `Header` instance.
"""
cards = []
# If the card separator contains characters that may validly appear in
# a card, the only way to unambiguously distinguish between cards is to
# require that they be Card.length long. However, if the separator
# contains non-valid characters (namely \n) the cards may be split
# immediately at the separator
require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS)
if isinstance(data, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place--accepting it here still gives us the
# opportunity to display warnings later during validation
CONTINUE = b"CONTINUE"
END = b"END"
end_card = END_CARD.encode("ascii")
sep = sep.encode("latin1")
empty = b""
else:
CONTINUE = "CONTINUE"
END = "END"
end_card = END_CARD
empty = ""
# Split the header into individual cards
idx = 0
image = []
while idx < len(data):
if require_full_cardlength:
end_idx = idx + Card.length
else:
try:
end_idx = data.index(sep, idx)
except ValueError:
end_idx = len(data)
next_image = data[idx:end_idx]
idx = end_idx + len(sep)
if image:
if next_image[:8] == CONTINUE:
image.append(next_image)
continue
cards.append(Card.fromstring(empty.join(image)))
if require_full_cardlength:
if next_image == end_card:
image = []
break
else:
if next_image.split(sep)[0].rstrip() == END:
image = []
break
image = [next_image]
# Add the last image that was found before the end, if any
if image:
cards.append(Card.fromstring(empty.join(image)))
return cls._fromcards(cards)
@classmethod
def fromfile(cls, fileobj, sep="", endcard=True, padding=True):
"""
Similar to :meth:`Header.fromstring`, but reads the header string from
a given file-like object or filename.
Parameters
----------
fileobj : str, file-like
A filename or an open file-like object from which a FITS header is
to be read. For open file handles the file pointer must be at the
beginning of the header.
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file).
endcard : bool, optional
If True (the default) the header must end with an END card in order
to be considered valid. If an END card is not found an
`OSError` is raised.
padding : bool, optional
If True (the default) the header will be required to be padded out
to a multiple of 2880, the FITS header block size. Otherwise any
padding, or lack thereof, is ignored.
Returns
-------
`Header`
A new `Header` instance.
"""
close_file = False
if isinstance(fileobj, path_like):
# If sep is non-empty we are trying to read a header printed to a
# text file, so open in text mode by default to support newline
# handling; if a binary-mode file object is passed in, the user is
# then on their own w.r.t. newline handling.
#
# Otherwise assume we are reading from an actual FITS file and open
# in binary mode.
fileobj = os.path.expanduser(fileobj)
if sep:
fileobj = open(fileobj, encoding="latin1")
else:
fileobj = open(fileobj, "rb")
close_file = True
try:
is_binary = fileobj_is_binary(fileobj)
def block_iter(nbytes):
while True:
data = fileobj.read(nbytes)
if data:
yield data
else:
break
return cls._from_blocks(block_iter, is_binary, sep, endcard, padding)[1]
finally:
if close_file:
fileobj.close()
@classmethod
def _fromcards(cls, cards):
header = cls()
for idx, card in enumerate(cards):
header._cards.append(card)
keyword = Card.normalize_keyword(card.keyword)
header._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
header._rvkc_indices[card.rawkeyword].append(idx)
header._modified = False
return header
@classmethod
def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding):
"""
The meat of `Header.fromfile`; in a separate method so that
`Header.fromfile` itself is just responsible for wrapping file
handling. Also used by `_BaseHDU.fromstring`.
``block_iter`` should be a callable which, given a block size n
(typically 2880 bytes as used by the FITS standard) returns an iterator
of byte strings of that block size.
``is_binary`` specifies whether the returned blocks are bytes or text
Returns both the entire header *string*, and the `Header` object
returned by Header.fromstring on that string.
"""
actual_block_size = _block_size(sep)
clen = Card.length + len(sep)
blocks = block_iter(actual_block_size)
# Read the first header block.
try:
block = next(blocks)
except StopIteration:
raise EOFError()
if not is_binary:
# TODO: There needs to be error handling at *this* level for
# non-ASCII characters; maybe at this stage decoding latin-1 might
# be safer
block = encode_ascii(block)
read_blocks = []
is_eof = False
end_found = False
# continue reading header blocks until END card or EOF is reached
while True:
# find the END card
end_found, block = cls._find_end_card(block, clen)
read_blocks.append(decode_ascii(block))
if end_found:
break
try:
block = next(blocks)
except StopIteration:
is_eof = True
break
if not block:
is_eof = True
break
if not is_binary:
block = encode_ascii(block)
header_str = "".join(read_blocks)
_check_padding(header_str, actual_block_size, is_eof, check_block_size=padding)
if not end_found and is_eof and endcard:
# TODO: Pass this error to validation framework as an ERROR,
# rather than raising an exception
raise OSError("Header missing END card.")
return header_str, cls.fromstring(header_str, sep=sep)
@classmethod
def _find_end_card(cls, block, card_len):
"""
Utility method to search a header block for the END card and handle
invalid END cards.
This method can also returned a modified copy of the input header block
in case an invalid end card needs to be sanitized.
"""
for mo in HEADER_END_RE.finditer(block):
# Ensure the END card was found, and it started on the
# boundary of a new card (see ticket #142)
if mo.start() % card_len != 0:
continue
# This must be the last header block, otherwise the
# file is malformatted
if mo.group("invalid"):
offset = mo.start()
trailing = block[offset + 3 : offset + card_len - 3].rstrip()
if trailing:
trailing = repr(trailing).lstrip("ub")
# TODO: Pass this warning up to the validation framework
warnings.warn(
f"Unexpected bytes trailing END keyword: {trailing}; these "
"bytes will be replaced with spaces on write.",
AstropyUserWarning,
)
else:
# TODO: Pass this warning up to the validation framework
warnings.warn(
"Missing padding to end of the FITS block after the "
"END keyword; additional spaces will be appended to "
f"the file upon writing to pad out to {BLOCK_SIZE} bytes.",
AstropyUserWarning,
)
# Sanitize out invalid END card now that the appropriate
# warnings have been issued
block = (
block[:offset]
+ encode_ascii(END_CARD)
+ block[offset + len(END_CARD) :]
)
return True, block
return False, block
def tostring(self, sep="", endcard=True, padding=True):
r"""
Returns a string representation of the header.
By default this uses no separator between cards, adds the END card, and
pads the string with spaces to the next multiple of 2880 bytes. That
is, it returns the header exactly as it would appear in a FITS file.
Parameters
----------
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If True (default) adds the END card to the end of the header
string
padding : bool, optional
If True (default) pads the string with spaces out to the next
multiple of 2880 characters
Returns
-------
str
A string representing a FITS header.
"""
lines = []
for card in self._cards:
s = str(card)
# Cards with CONTINUE cards may be longer than 80 chars; so break
# them into multiple lines
while s:
lines.append(s[: Card.length])
s = s[Card.length :]
s = sep.join(lines)
if endcard:
s += sep + _pad("END")
if padding:
s += " " * _pad_length(len(s))
return s
def tofile(self, fileobj, sep="", endcard=True, padding=True, overwrite=False):
r"""
Writes the header to file or file-like object.
By default this writes the header exactly as it would be written to a
FITS file, with the END card included and padding to the next multiple
of 2880 bytes. However, aspects of this may be controlled.
Parameters
----------
fileobj : path-like or file-like, optional
Either the pathname of a file, or an open file handle or file-like
object.
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If `True` (default) adds the END card to the end of the header
string
padding : bool, optional
If `True` (default) pads the string with spaces out to the next
multiple of 2880 characters
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
"""
close_file = fileobj_closed(fileobj)
if not isinstance(fileobj, _File):
fileobj = _File(fileobj, mode="ostream", overwrite=overwrite)
try:
blocks = self.tostring(sep=sep, endcard=endcard, padding=padding)
actual_block_size = _block_size(sep)
if padding and len(blocks) % actual_block_size != 0:
raise OSError(
f"Header size ({len(blocks) - actual_block_size + BLOCK_SIZE}) "
f"is not a multiple of block size ({BLOCK_SIZE})."
)
fileobj.flush()
fileobj.write(blocks.encode("ascii"))
fileobj.flush()
finally:
if close_file:
fileobj.close()
@classmethod
def fromtextfile(cls, fileobj, endcard=False):
"""
Read a header from a simple text file or file-like object.
Equivalent to::
>>> Header.fromfile(fileobj, sep='\\n', endcard=False,
... padding=False)
See Also
--------
fromfile
"""
return cls.fromfile(fileobj, sep="\n", endcard=endcard, padding=False)
def totextfile(self, fileobj, endcard=False, overwrite=False):
"""
Write the header as text to a file or a file-like object.
Equivalent to::
>>> Header.tofile(fileobj, sep='\\n', endcard=False,
... padding=False, overwrite=overwrite)
See Also
--------
tofile
"""
self.tofile(
fileobj, sep="\n", endcard=endcard, padding=False, overwrite=overwrite
)
def clear(self):
"""
Remove all cards from the header.
"""
self._cards = []
self._keyword_indices = collections.defaultdict(list)
self._rvkc_indices = collections.defaultdict(list)
def copy(self, strip=False):
"""
Make a copy of the :class:`Header`.
.. versionchanged:: 1.3
`copy.copy` and `copy.deepcopy` on a `Header` will call this
method.
Parameters
----------
strip : bool, optional
If `True`, strip any headers that are specific to one of the
standard HDU types, so that this header can be used in a different
HDU.
Returns
-------
`Header`
A new :class:`Header` instance.
"""
tmp = self.__class__(copy.copy(card) for card in self._cards)
if strip:
tmp.strip()
return tmp
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args, **kwargs):
return self.copy()
@classmethod
def fromkeys(cls, iterable, value=None):
"""
Similar to :meth:`dict.fromkeys`--creates a new `Header` from an
iterable of keywords and an optional default value.
This method is not likely to be particularly useful for creating real
world FITS headers, but it is useful for testing.
Parameters
----------
iterable
Any iterable that returns strings representing FITS keywords.
value : optional
A default value to assign to each keyword; must be a valid type for
FITS keywords.
Returns
-------
`Header`
A new `Header` instance.
"""
d = cls()
if not isinstance(value, tuple):
value = (value,)
for key in iterable:
d.append((key,) + value)
return d
def get(self, key, default=None):
"""
Similar to :meth:`dict.get`--returns the value associated with keyword
in the header, or a default value if the keyword is not found.
Parameters
----------
key : str
A keyword that may or may not be in the header.
default : optional
A default value to return if the keyword is not found in the
header.
Returns
-------
value: str, number, complex, bool, or ``astropy.io.fits.card.Undefined``
The value associated with the given keyword, or the default value
if the keyword is not in the header.
"""
try:
return self[key]
except (KeyError, IndexError):
return default
def set(self, keyword, value=None, comment=None, before=None, after=None):
"""
Set the value and/or comment and/or position of a specified keyword.
If the keyword does not already exist in the header, a new keyword is
created in the specified position, or appended to the end of the header
if no position is specified.
This method is similar to :meth:`Header.update` prior to Astropy v0.1.
.. note::
It should be noted that ``header.set(keyword, value)`` and
``header.set(keyword, value, comment)`` are equivalent to
``header[keyword] = value`` and
``header[keyword] = (value, comment)`` respectively.
Parameters
----------
keyword : str
A header keyword
value : str, optional
The value to set for the given keyword; if None the existing value
is kept, but '' may be used to set a blank value
comment : str, optional
The comment to set for the given keyword; if None the existing
comment is kept, but ``''`` may be used to set a blank comment
before : str, int, optional
Name of the keyword, or index of the `Card` before which this card
should be located in the header. The argument ``before`` takes
precedence over ``after`` if both specified.
after : str, int, optional
Name of the keyword, or index of the `Card` after which this card
should be located in the header.
"""
# Create a temporary card that looks like the one being set; if the
# temporary card turns out to be a RVKC this will make it easier to
# deal with the idiosyncrasies thereof
# Don't try to make a temporary card though if they keyword looks like
# it might be a HIERARCH card or is otherwise invalid--this step is
# only for validating RVKCs.
if (
len(keyword) <= KEYWORD_LENGTH
and Card._keywd_FSC_RE.match(keyword)
and keyword not in self._keyword_indices
):
new_card = Card(keyword, value, comment)
new_keyword = new_card.keyword
else:
new_keyword = keyword
if new_keyword not in _commentary_keywords and new_keyword in self:
if comment is None:
comment = self.comments[keyword]
if value is None:
value = self[keyword]
self[keyword] = (value, comment)
if before is not None or after is not None:
card = self._cards[self._cardindex(keyword)]
self._relativeinsert(card, before=before, after=after, replace=True)
elif before is not None or after is not None:
self._relativeinsert((keyword, value, comment), before=before, after=after)
else:
self[keyword] = (value, comment)
def items(self):
"""Like :meth:`dict.items`."""
for card in self._cards:
yield card.keyword, None if card.value == UNDEFINED else card.value
def keys(self):
"""
Like :meth:`dict.keys`--iterating directly over the `Header`
instance has the same behavior.
"""
for card in self._cards:
yield card.keyword
def values(self):
"""Like :meth:`dict.values`."""
for card in self._cards:
yield None if card.value == UNDEFINED else card.value
def pop(self, *args):
"""
Works like :meth:`list.pop` if no arguments or an index argument are
supplied; otherwise works like :meth:`dict.pop`.
"""
if len(args) > 2:
raise TypeError(f"Header.pop expected at most 2 arguments, got {len(args)}")
if len(args) == 0:
key = -1
else:
key = args[0]
try:
value = self[key]
except (KeyError, IndexError):
if len(args) == 2:
return args[1]
raise
del self[key]
return value
def popitem(self):
"""Similar to :meth:`dict.popitem`."""
try:
k, v = next(self.items())
except StopIteration:
raise KeyError("Header is empty")
del self[k]
return k, v
def setdefault(self, key, default=None):
"""Similar to :meth:`dict.setdefault`."""
try:
return self[key]
except (KeyError, IndexError):
self[key] = default
return default
def update(self, *args, **kwargs):
"""
Update the Header with new keyword values, updating the values of
existing keywords and appending new keywords otherwise; similar to
`dict.update`.
`update` accepts either a dict-like object or an iterable. In the
former case the keys must be header keywords and the values may be
either scalar values or (value, comment) tuples. In the case of an
iterable the items must be (keyword, value) tuples or (keyword, value,
comment) tuples.
Arbitrary arguments are also accepted, in which case the update() is
called again with the kwargs dict as its only argument. That is,
::
>>> header.update(NAXIS1=100, NAXIS2=100)
is equivalent to::
header.update({'NAXIS1': 100, 'NAXIS2': 100})
"""
if args:
other = args[0]
else:
other = None
def update_from_dict(k, v):
if not isinstance(v, tuple):
card = Card(k, v)
elif 0 < len(v) <= 2:
card = Card(*((k,) + v))
else:
raise ValueError(
f"Header update value for key {k!r} is invalid; the "
"value must be either a scalar, a 1-tuple "
"containing the scalar value, or a 2-tuple "
"containing the value and a comment string."
)
self._update(card)
if other is None:
pass
elif isinstance(other, Header):
for card in other.cards:
self._update(card)
elif hasattr(other, "items"):
for k, v in other.items():
update_from_dict(k, v)
elif hasattr(other, "keys"):
for k in other.keys():
update_from_dict(k, other[k])
else:
for idx, card in enumerate(other):
if isinstance(card, Card):
self._update(card)
elif isinstance(card, tuple) and (1 < len(card) <= 3):
self._update(Card(*card))
else:
raise ValueError(
f"Header update sequence item #{idx} is invalid; "
"the item must either be a 2-tuple containing "
"a keyword and value, or a 3-tuple containing "
"a keyword, value, and comment string."
)
if kwargs:
self.update(kwargs)
def append(self, card=None, useblanks=True, bottom=False, end=False):
"""
Appends a new keyword+value card to the end of the Header, similar
to `list.append`.
By default if the last cards in the Header have commentary keywords,
this will append the new keyword before the commentary (unless the new
keyword is also commentary).
Also differs from `list.append` in that it can be called with no
arguments: In this case a blank card is appended to the end of the
Header. In the case all the keyword arguments are ignored.
Parameters
----------
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple representing a
single header card; the comment is optional in which case a
2-tuple may be used
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
bottom : bool, optional
If True, instead of appending after the last non-commentary card,
append after the last non-blank card.
end : bool, optional
If True, ignore the useblanks and bottom options, and append at the
very end of the Header.
"""
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
"The value appended to a Header must be either a keyword or "
f"(keyword, value, [comment]) tuple; got: {card!r}"
)
if not end and card.is_blank:
# Blank cards should always just be appended to the end
end = True
if end:
self._cards.append(card)
idx = len(self._cards) - 1
else:
idx = len(self._cards) - 1
while idx >= 0 and self._cards[idx].is_blank:
idx -= 1
if not bottom and card.keyword not in _commentary_keywords:
while idx >= 0 and self._cards[idx].keyword in _commentary_keywords:
idx -= 1
idx += 1
self._cards.insert(idx, card)
self._updateindices(idx)
keyword = Card.normalize_keyword(card.keyword)
self._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
self._rvkc_indices[card.rawkeyword].append(idx)
if not end:
# If the appended card was a commentary card, and it was appended
# before existing cards with the same keyword, the indices for
# cards with that keyword may have changed
if not bottom and card.keyword in _commentary_keywords:
self._keyword_indices[keyword].sort()
# Finally, if useblanks, delete a blank cards from the end
if useblanks and self._countblanks():
# Don't do this unless there is at least one blanks at the end
# of the header; we need to convert the card to its string
# image to see how long it is. In the vast majority of cases
# this will just be 80 (Card.length) but it may be longer for
# CONTINUE cards
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def extend(
self,
cards,
strip=True,
unique=False,
update=False,
update_first=False,
useblanks=True,
bottom=False,
end=False,
):
"""
Appends multiple keyword+value cards to the end of the header, similar
to `list.extend`.
Parameters
----------
cards : iterable
An iterable of (keyword, value, [comment]) tuples; see
`Header.append`.
strip : bool, optional
Remove any keywords that have meaning only to specific types of
HDUs, so that only more general keywords are added from extension
Header or Card list (default: `True`).
unique : bool, optional
If `True`, ensures that no duplicate keywords are appended;
keywords already in this header are simply discarded. The
exception is commentary keywords (COMMENT, HISTORY, etc.): they are
only treated as duplicates if their values match.
update : bool, optional
If `True`, update the current header with the values and comments
from duplicate keywords in the input header. This supersedes the
``unique`` argument. Commentary keywords are treated the same as
if ``unique=True``.
update_first : bool, optional
If the first keyword in the header is 'SIMPLE', and the first
keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is
replaced by the 'XTENSION' keyword. Likewise if the first keyword
in the header is 'XTENSION' and the first keyword in the input
header is 'SIMPLE', the 'XTENSION' keyword is replaced by the
'SIMPLE' keyword. This behavior is otherwise dumb as to whether or
not the resulting header is a valid primary or extension header.
This is mostly provided to support backwards compatibility with the
old ``Header.fromTxtFile`` method, and only applies if
``update=True``.
useblanks, bottom, end : bool, optional
These arguments are passed to :meth:`Header.append` while appending
new cards to the header.
"""
temp = self.__class__(cards)
if strip:
temp.strip()
if len(self):
first = self._cards[0].keyword
else:
first = None
# We don't immediately modify the header, because first we need to sift
# out any duplicates in the new header prior to adding them to the
# existing header, but while *allowing* duplicates from the header
# being extended from (see ticket #156)
extend_cards = []
for idx, card in enumerate(temp.cards):
keyword = card.keyword
if keyword not in _commentary_keywords:
if unique and not update and keyword in self:
continue
elif update:
if idx == 0 and update_first:
# Dumbly update the first keyword to either SIMPLE or
# XTENSION as the case may be, as was in the case in
# Header.fromTxtFile
if (keyword == "SIMPLE" and first == "XTENSION") or (
keyword == "XTENSION" and first == "SIMPLE"
):
del self[0]
self.insert(0, card)
else:
self[keyword] = (card.value, card.comment)
elif keyword in self:
self[keyword] = (card.value, card.comment)
else:
extend_cards.append(card)
else:
extend_cards.append(card)
else:
if (unique or update) and keyword in self:
if card.is_blank:
extend_cards.append(card)
continue
for value in self[keyword]:
if value == card.value:
break
else:
extend_cards.append(card)
else:
extend_cards.append(card)
for card in extend_cards:
self.append(card, useblanks=useblanks, bottom=bottom, end=end)
def count(self, keyword):
"""
Returns the count of the given keyword in the header, similar to
`list.count` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword to count instances of in the header
"""
keyword = Card.normalize_keyword(keyword)
# We have to look before we leap, since otherwise _keyword_indices,
# being a defaultdict, will create an entry for the nonexistent keyword
if keyword not in self._keyword_indices:
raise KeyError(f"Keyword {keyword!r} not found.")
return len(self._keyword_indices[keyword])
def index(self, keyword, start=None, stop=None):
"""
Returns the index if the first instance of the given keyword in the
header, similar to `list.index` if the Header object is treated as a
list of keywords.
Parameters
----------
keyword : str
The keyword to look up in the list of all keywords in the header
start : int, optional
The lower bound for the index
stop : int, optional
The upper bound for the index
"""
if start is None:
start = 0
if stop is None:
stop = len(self._cards)
if stop < start:
step = -1
else:
step = 1
norm_keyword = Card.normalize_keyword(keyword)
for idx in range(start, stop, step):
if self._cards[idx].keyword.upper() == norm_keyword:
return idx
raise ValueError(f"The keyword {keyword!r} is not in the header.")
def insert(self, key, card, useblanks=True, after=False):
"""
Inserts a new keyword+value card into the Header at a given location,
similar to `list.insert`.
New keywords can also be inserted relative to existing keywords
using, for example::
>>> header = Header({"NAXIS1": 10})
>>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
to insert before an existing keyword, or::
>>> header.insert('NAXIS1', ('NAXIS2', 4096), after=True)
to insert after an existing keyword.
Parameters
----------
key : int, str, or tuple
The index into the list of header keywords before which the
new keyword should be inserted, or the name of a keyword before
which the new keyword should be inserted. Can also accept a
(keyword, index) tuple for inserting around duplicate keywords.
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple; see
`Header.append`
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
after : bool, optional
If set to `True`, insert *after* the specified index or keyword,
rather than before it. Defaults to `False`.
"""
if not isinstance(key, numbers.Integral):
# Don't pass through ints to _cardindex because it will not take
# kindly to indices outside the existing number of cards in the
# header, which insert needs to be able to support (for example
# when inserting into empty headers)
idx = self._cardindex(key)
else:
idx = key
if after:
if idx == -1:
idx = len(self._cards)
else:
idx += 1
if idx >= len(self._cards):
# This is just an append (Though it must be an append absolutely to
# the bottom, ignoring blanks, etc.--the point of the insert method
# is that you get exactly what you asked for with no surprises)
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
"The value inserted into a Header must be either a keyword or "
f"(keyword, value, [comment]) tuple; got: {card!r}"
)
self._cards.insert(idx, card)
keyword = card.keyword
# If idx was < 0, determine the actual index according to the rules
# used by list.insert()
if idx < 0:
idx += len(self._cards) - 1
idx = max(idx, 0)
# All the keyword indices above the insertion point must be updated
self._updateindices(idx)
keyword = Card.normalize_keyword(keyword)
self._keyword_indices[keyword].append(idx)
count = len(self._keyword_indices[keyword])
if count > 1:
# There were already keywords with this same name
if keyword not in _commentary_keywords:
warnings.warn(
f"A {keyword!r} keyword already exists in this header. Inserting "
"duplicate keyword.",
AstropyUserWarning,
)
self._keyword_indices[keyword].sort()
if card.field_specifier is not None:
# Update the index of RVKC as well
rvkc_indices = self._rvkc_indices[card.rawkeyword]
rvkc_indices.append(idx)
rvkc_indices.sort()
if useblanks:
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def remove(self, keyword, ignore_missing=False, remove_all=False):
"""
Removes the first instance of the given keyword from the header similar
to `list.remove` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword of which to remove the first instance in the header.
ignore_missing : bool, optional
When True, ignores missing keywords. Otherwise, if the keyword
is not present in the header a KeyError is raised.
remove_all : bool, optional
When True, all instances of keyword will be removed.
Otherwise only the first instance of the given keyword is removed.
"""
keyword = Card.normalize_keyword(keyword)
if keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
if remove_all:
while keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
elif not ignore_missing:
raise KeyError(f"Keyword '{keyword}' not found.")
def rename_keyword(self, oldkeyword, newkeyword, force=False):
"""
Rename a card's keyword in the header.
Parameters
----------
oldkeyword : str or int
Old keyword or card index
newkeyword : str
New keyword
force : bool, optional
When `True`, if the new keyword already exists in the header, force
the creation of a duplicate keyword. Otherwise a
`ValueError` is raised.
"""
old = Card.normalize_keyword(oldkeyword)
new = Card.normalize_keyword(newkeyword)
if new == "CONTINUE":
raise ValueError("Can not rename to CONTINUE")
if new in _commentary_keywords or old in _commentary_keywords:
if not (new in _commentary_keywords and old in _commentary_keywords):
raise ValueError(
"Regular and commentary keys can not be renamed to each other."
)
elif not force and new in self:
raise ValueError(f"Intended keyword {new} already exists in header.")
idx = self.index(old)
card = self._cards[idx]
del self[idx]
self.insert(idx, (new, card.value, card.comment))
def add_history(self, value, before=None, after=None):
"""
Add a ``HISTORY`` card.
Parameters
----------
value : str
History text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("HISTORY", value, before=before, after=after)
def add_comment(self, value, before=None, after=None):
"""
Add a ``COMMENT`` card.
Parameters
----------
value : str
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("COMMENT", value, before=before, after=after)
def add_blank(self, value="", before=None, after=None):
"""
Add a blank card.
Parameters
----------
value : str, optional
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("", value, before=before, after=after)
def strip(self):
"""
Strip cards specific to a certain kind of header.
Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of
the header can be used to reconstruct another kind of header.
"""
# TODO: Previously this only deleted some cards specific to an HDU if
# _hdutype matched that type. But it seemed simple enough to just
# delete all desired cards anyways, and just ignore the KeyErrors if
# they don't exist.
# However, it might be desirable to make this extendable somehow--have
# a way for HDU classes to specify some headers that are specific only
# to that type, and should be removed otherwise.
naxis = self.get("NAXIS", 0)
tfields = self.get("TFIELDS", 0)
for idx in range(naxis):
self.remove("NAXIS" + str(idx + 1), ignore_missing=True)
for name in (
"TFORM",
"TSCAL",
"TZERO",
"TNULL",
"TTYPE",
"TUNIT",
"TDISP",
"TDIM",
"THEAP",
"TBCOL",
):
for idx in range(tfields):
self.remove(name + str(idx + 1), ignore_missing=True)
for name in (
"SIMPLE",
"XTENSION",
"BITPIX",
"NAXIS",
"EXTEND",
"PCOUNT",
"GCOUNT",
"GROUPS",
"BSCALE",
"BZERO",
"TFIELDS",
):
self.remove(name, ignore_missing=True)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
def _update(self, card):
"""
The real update code. If keyword already exists, its value and/or
comment will be updated. Otherwise a new card will be appended.
This will not create a duplicate keyword except in the case of
commentary cards. The only other way to force creation of a duplicate
is to use the insert(), append(), or extend() methods.
"""
keyword, value, comment = card
# Lookups for existing/known keywords are case-insensitive
keyword = keyword.strip().upper().removeprefix("HIERARCH ")
if keyword not in _commentary_keywords and keyword in self._keyword_indices:
# Easy; just update the value/comment
idx = self._keyword_indices[keyword][0]
existing_card = self._cards[idx]
existing_card.value = value
if comment is not None:
# '' should be used to explicitly blank a comment
existing_card.comment = comment
if existing_card._modified:
self._modified = True
elif keyword in _commentary_keywords:
cards = self._splitcommentary(keyword, value)
if keyword in self._keyword_indices:
# Append after the last keyword of the same type
idx = self.index(keyword, start=len(self) - 1, stop=-1)
isblank = not (keyword or value or comment)
for c in reversed(cards):
self.insert(idx + 1, c, useblanks=(not isblank))
else:
for c in cards:
self.append(c, bottom=True)
else:
# A new keyword! self.append() will handle updating _modified
self.append(card)
def _cardindex(self, key):
"""Returns an index into the ._cards list given a valid lookup key."""
# This used to just set key = (key, 0) and then go on to act as if the
# user passed in a tuple, but it's much more common to just be given a
# string as the key, so optimize more for that case
if isinstance(key, str):
keyword = key
n = 0
elif isinstance(key, numbers.Integral):
# If < 0, determine the actual index
if key < 0:
key += len(self._cards)
if key < 0 or key >= len(self._cards):
raise IndexError("Header index out of range.")
return key
elif isinstance(key, slice):
return key
elif isinstance(key, tuple):
if (
len(key) != 2
or not isinstance(key[0], str)
or not isinstance(key[1], numbers.Integral)
):
raise ValueError(
"Tuple indices must be 2-tuples consisting of a "
"keyword string and an integer index."
)
keyword, n = key
else:
raise ValueError(
"Header indices must be either a string, a 2-tuple, or an integer."
)
keyword = Card.normalize_keyword(keyword)
# Returns the index into _cards for the n-th card with the given
# keyword (where n is 0-based)
indices = self._keyword_indices.get(keyword, None)
if keyword and not indices:
if len(keyword) > KEYWORD_LENGTH or "." in keyword:
raise KeyError(f"Keyword {keyword!r} not found.")
else:
# Maybe it's a RVKC?
indices = self._rvkc_indices.get(keyword, None)
if not indices:
raise KeyError(f"Keyword {keyword!r} not found.")
try:
return indices[n]
except IndexError:
raise IndexError(
f"There are only {len(indices)} {keyword!r} cards in the header."
)
def _keyword_from_index(self, idx):
"""
Given an integer index, return the (keyword, repeat) tuple that index
refers to. For most keywords the repeat will always be zero, but it
may be greater than zero for keywords that are duplicated (especially
commentary keywords).
In a sense this is the inverse of self.index, except that it also
supports duplicates.
"""
if idx < 0:
idx += len(self._cards)
keyword = self._cards[idx].keyword
keyword = Card.normalize_keyword(keyword)
repeat = self._keyword_indices[keyword].index(idx)
return keyword, repeat
def _relativeinsert(self, card, before=None, after=None, replace=False):
"""
Inserts a new card before or after an existing card; used to
implement support for the legacy before/after keyword arguments to
Header.update().
If replace=True, move an existing card with the same keyword.
"""
if before is None:
insertionkey = after
else:
insertionkey = before
def get_insertion_idx():
if not (
isinstance(insertionkey, numbers.Integral)
and insertionkey >= len(self._cards)
):
idx = self._cardindex(insertionkey)
else:
idx = insertionkey
if before is None:
idx += 1
return idx
if replace:
# The card presumably already exists somewhere in the header.
# Check whether or not we actually have to move it; if it does need
# to be moved we just delete it and then it will be reinserted
# below
old_idx = self._cardindex(card.keyword)
insertion_idx = get_insertion_idx()
if insertion_idx >= len(self._cards) and old_idx == len(self._cards) - 1:
# The card would be appended to the end, but it's already at
# the end
return
if before is not None:
if old_idx == insertion_idx - 1:
return
elif after is not None and old_idx == insertion_idx:
return
del self[old_idx]
# Even if replace=True, the insertion idx may have changed since the
# old card was deleted
idx = get_insertion_idx()
if card[0] in _commentary_keywords:
cards = reversed(self._splitcommentary(card[0], card[1]))
else:
cards = [card]
for c in cards:
self.insert(idx, c)
def _updateindices(self, idx, increment=True):
"""
For all cards with index above idx, increment or decrement its index
value in the keyword_indices dict.
"""
if idx > len(self._cards):
# Save us some effort
return
increment = 1 if increment else -1
for index_sets in (self._keyword_indices, self._rvkc_indices):
for indices in index_sets.values():
for jdx, keyword_index in enumerate(indices):
if keyword_index >= idx:
indices[jdx] += increment
def _countblanks(self):
"""Returns the number of blank cards at the end of the Header."""
for idx in range(1, len(self._cards)):
if not self._cards[-idx].is_blank:
return idx - 1
return 0
def _useblanks(self, count):
for _ in range(count):
if self._cards[-1].is_blank:
del self[-1]
else:
break
def _haswildcard(self, keyword):
"""Return `True` if the input keyword contains a wildcard pattern."""
return isinstance(keyword, str) and (
keyword.endswith("...") or "*" in keyword or "?" in keyword
)
def _wildcardmatch(self, pattern):
"""
Returns a list of indices of the cards matching the given wildcard
pattern.
* '*' matches 0 or more characters
* '?' matches a single character
* '...' matches 0 or more of any non-whitespace character
"""
pattern = pattern.replace("*", r".*").replace("?", r".")
pattern = pattern.replace("...", r"\S*") + "$"
match_pattern = re.compile(pattern, re.IGNORECASE).match
return [i for i, card in enumerate(self._cards) if match_pattern(card.keyword)]
def _set_slice(self, key, value, target):
"""
Used to implement Header.__setitem__ and CardAccessor.__setitem__.
"""
if isinstance(key, slice) or self._haswildcard(key):
if isinstance(key, slice):
indices = range(*key.indices(len(target)))
else:
indices = self._wildcardmatch(key)
if isinstance(value, str) or not np.iterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
target[idx] = val
return True
return False
def _splitcommentary(self, keyword, value):
"""
Given a commentary keyword and value, returns a list of the one or more
cards needed to represent the full value. This is primarily used to
create the multiple commentary cards needed to represent a long value
that won't fit into a single commentary card.
"""
# The maximum value in each card can be the maximum card length minus
# the maximum key length (which can include spaces if they key length
# less than 8
maxlen = Card.length - KEYWORD_LENGTH
valuestr = str(value)
if len(valuestr) <= maxlen:
# The value can fit in a single card
cards = [Card(keyword, value)]
else:
# The value must be split across multiple consecutive commentary
# cards
idx = 0
cards = []
while idx < len(valuestr):
cards.append(Card(keyword, valuestr[idx : idx + maxlen]))
idx += maxlen
return cards
def _add_commentary(self, key, value, before=None, after=None):
"""
Add a commentary card.
If ``before`` and ``after`` are `None`, add to the last occurrence
of cards of the same name (except blank card). If there is no
card (or blank card), append at the end.
"""
if before is not None or after is not None:
self._relativeinsert((key, value), before=before, after=after)
else:
self[key] = value
collections.abc.MutableSequence.register(Header)
collections.abc.MutableMapping.register(Header)
|
Header
|
python
|
doocs__leetcode
|
solution/2000-2099/2011.Final Value of Variable After Performing Operations/Solution.py
|
{
"start": 0,
"end": 152
}
|
class ____:
def finalValueAfterOperations(self, operations: List[str]) -> int:
return sum(1 if s[1] == '+' else -1 for s in operations)
|
Solution
|
python
|
doocs__leetcode
|
solution/2300-2399/2393.Count Strictly Increasing Subarrays/Solution.py
|
{
"start": 0,
"end": 259
}
|
class ____:
def countSubarrays(self, nums: List[int]) -> int:
ans = cnt = 1
for x, y in pairwise(nums):
if x < y:
cnt += 1
else:
cnt = 1
ans += cnt
return ans
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry/integrations/jira/test_utils.py
|
{
"start": 237,
"end": 723
}
|
class ____(TestCase):
def test_jira_cloud(self) -> None:
user_response = StubService.get_stub_data("jira", "user.json")
assert build_user_choice(user_response, "accountId") == (
"012345:00000000-1111-2222-3333-444444444444",
"Saif Hakim",
)
def test_unexpected_id(self) -> None:
user_response = StubService.get_stub_data("jira", "user.json")
assert build_user_choice(user_response, "name") is None
|
BuildUserChoiceTest
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py
|
{
"start": 1682,
"end": 6812
}
|
class ____:
root_path: Path
workspace_root_path: Optional[Path] = None
root_file_path: Optional[Path] = None
root_validation_result: Optional["DgConfigValidationResult"] = None
container_workspace_file_path: Optional[Path] = None
container_workspace_validation_result: Optional["DgConfigValidationResult"] = None
user_file_path: Optional[Path] = None
user_config: Optional["DgRawCliConfig"] = None
cli_config_warning: Optional[str]
@property
def has_root_file(self) -> bool:
return self.root_file_path is not None
@property
def has_container_workspace_file(self) -> bool:
return self.container_workspace_file_path is not None
@property
def has_user_file(self) -> bool:
return self.user_file_path is not None
@property
def root_result(self) -> "DgConfigValidationResult":
if not self.root_validation_result:
raise DgError("No root file validation result available.")
return self.root_validation_result
@property
def container_workspace_result(self) -> "DgConfigValidationResult":
if not self.container_workspace_validation_result:
raise DgError("No container workspace validation result available.")
return self.container_workspace_validation_result
@property
def root_type(self) -> Optional[str]:
if not self.root_file_path:
return None
return self.root_result.type
@property
def root_config(self) -> Optional["DgFileConfig"]:
if not self.root_validation_result:
return None
return self.root_validation_result.config
@property
def container_workspace_config(self) -> Optional["DgWorkspaceFileConfig"]:
if not self.container_workspace_validation_result:
return None
return cast("DgWorkspaceFileConfig", self.container_workspace_validation_result.config)
def discover_and_validate_config_files(path: Path) -> DgConfigFileDiscoveryResult:
root_config_path = discover_config_file(path)
workspace_config_path = discover_config_file(
path, lambda x: bool(x.get("directory_type") == "workspace")
)
cli_config_warning: Optional[str] = None
if root_config_path:
root_path = root_config_path.parent
root_file_validation_result = validate_dg_file_config(root_config_path)
if workspace_config_path is None:
workspace_root_path = None
container_workspace_validation_result = None
# Only load the workspace config if the workspace root is different from the first
# detected root.
elif workspace_config_path == root_config_path:
workspace_root_path = workspace_config_path.parent
container_workspace_validation_result = None
else:
workspace_root_path = workspace_config_path.parent
container_workspace_validation_result = validate_dg_file_config(workspace_config_path)
if (
not root_file_validation_result.has_errors
and "cli" in root_file_validation_result.config
):
del root_file_validation_result.config["cli"]
# We have to emit this _after_ we merge all configs to ensure we have the right
# suppression list.
cli_config_warning = generate_tool_dg_cli_in_project_in_workspace_error_message(
root_path, workspace_root_path
)
else:
root_path = Path.cwd()
workspace_root_path = None
root_file_validation_result = None
container_workspace_validation_result = None
if has_dg_user_file_config():
user_config = load_dg_user_file_config()
else:
user_config = None
return DgConfigFileDiscoveryResult(
root_path=root_path,
workspace_root_path=workspace_root_path,
root_file_path=root_config_path,
root_validation_result=root_file_validation_result,
container_workspace_file_path=workspace_config_path
if root_path != workspace_root_path
else None,
container_workspace_validation_result=container_workspace_validation_result,
user_file_path=get_dg_config_path() if has_dg_user_file_config() else None,
user_config=user_config,
cli_config_warning=cli_config_warning,
)
# NOTE: The presence of dg.toml will cause pyproject.toml to be ignored for purposes of dg config.
_CLI_CONTEXT_CONFIG_KEY = "config"
def set_config_on_cli_context(cli_context: click.Context, config: "DgRawCliConfig") -> None:
cli_context.ensure_object(dict)
cli_context.obj[_CLI_CONTEXT_CONFIG_KEY] = config
def has_config_on_cli_context(cli_context: click.Context) -> bool:
return _CLI_CONTEXT_CONFIG_KEY in cli_context.ensure_object(dict)
def get_config_from_cli_context(cli_context: click.Context) -> "DgRawCliConfig":
cli_context.ensure_object(dict)
return cli_context.obj[_CLI_CONTEXT_CONFIG_KEY]
# ########################
# ##### MAIN
# ########################
@dataclass
|
DgConfigFileDiscoveryResult
|
python
|
fluentpython__example-code-2e
|
21-async/domains/curio/domainlib.py
|
{
"start": 143,
"end": 646
}
|
class ____(NamedTuple):
domain: str
found: bool
async def probe(domain: str) -> Result:
try:
await socket.getaddrinfo(domain, None)
except socket.gaierror:
return Result(domain, False)
return Result(domain, True)
async def multi_probe(domains: Iterable[str]) -> AsyncIterator[Result]:
async with TaskGroup() as group:
for domain in domains:
await group.spawn(probe, domain)
async for task in group:
yield task.result
|
Result
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/regression/liblinear_svr.py
|
{
"start": 564,
"end": 3916
}
|
class ____(AutoSklearnRegressionAlgorithm):
# Liblinear is not deterministic as it uses a RNG inside
def __init__(
self,
loss,
epsilon,
dual,
tol,
C,
fit_intercept,
intercept_scaling,
random_state=None,
):
self.epsilon = epsilon
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.random_state = random_state
self.estimator = None
def fit(self, X, y):
import sklearn.svm
self.C = float(self.C)
self.tol = float(self.tol)
self.epsilon = float(self.epsilon)
self.dual = check_for_bool(self.dual)
self.fit_intercept = check_for_bool(self.fit_intercept)
self.intercept_scaling = float(self.intercept_scaling)
self.estimator = sklearn.svm.LinearSVR(
epsilon=self.epsilon,
loss=self.loss,
dual=self.dual,
tol=self.tol,
C=self.C,
fit_intercept=self.fit_intercept,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
)
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
self.estimator.fit(X, y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "Liblinear-SVR",
"name": "Liblinear Support Vector Regression",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": False,
"is_deterministic": False,
"input": (SPARSE, DENSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
C = UniformFloatHyperparameter("C", 0.03125, 32768, log=True, default_value=1.0)
loss = CategoricalHyperparameter(
"loss",
["epsilon_insensitive", "squared_epsilon_insensitive"],
default_value="squared_epsilon_insensitive",
)
# Random Guess
epsilon = UniformFloatHyperparameter(
name="epsilon", lower=0.001, upper=1, default_value=0.1, log=True
)
dual = Constant("dual", "False")
# These are set ad-hoc
tol = UniformFloatHyperparameter(
"tol", 1e-5, 1e-1, default_value=1e-4, log=True
)
fit_intercept = Constant("fit_intercept", "True")
intercept_scaling = Constant("intercept_scaling", 1)
cs.add_hyperparameters(
[C, loss, epsilon, dual, tol, fit_intercept, intercept_scaling]
)
dual_and_loss = ForbiddenAndConjunction(
ForbiddenEqualsClause(dual, "False"),
ForbiddenEqualsClause(loss, "epsilon_insensitive"),
)
cs.add_forbidden_clause(dual_and_loss)
return cs
|
LibLinear_SVR
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/distributions/gamma.py
|
{
"start": 10078,
"end": 12218
}
|
class ____(Gamma):
"""`Gamma` with softplus of `concentration` and `rate`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Gamma(tf.nn.softplus(concentration), "
"tf.nn.softplus(rate))` instead.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="GammaWithSoftplusConcentrationRate"):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
super(GammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Gamma, Gamma)
def _kl_gamma_gamma(g0, g1, name=None):
"""Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.
Args:
g0: instance of a Gamma distribution object.
g1: instance of a Gamma distribution object.
name: (optional) Name to use for created operations.
Default is "kl_gamma_gamma".
Returns:
kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
"""
with ops.name_scope(name, "kl_gamma_gamma", values=[
g0.concentration, g0.rate, g1.concentration, g1.rate]):
# Result from:
# http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
# For derivation see:
# http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long
return (((g0.concentration - g1.concentration)
* math_ops.digamma(g0.concentration))
+ math_ops.lgamma(g1.concentration)
- math_ops.lgamma(g0.concentration)
+ g1.concentration * math_ops.log(g0.rate)
- g1.concentration * math_ops.log(g1.rate)
+ g0.concentration * (g1.rate / g0.rate - 1.))
|
GammaWithSoftplusConcentrationRate
|
python
|
scrapy__scrapy
|
tests/test_spidermiddleware.py
|
{
"start": 14199,
"end": 16814
}
|
class ____:
@pytest.fixture
def crawler(self) -> Crawler:
return get_crawler(Spider)
@pytest.fixture
def mwman(self, crawler: Crawler) -> SpiderMiddlewareManager:
return SpiderMiddlewareManager.from_crawler(crawler)
def test_simple_mw(self, mwman: SpiderMiddlewareManager) -> None:
mw = ProcessSpiderOutputSimpleMiddleware()
mwman._add_middleware(mw)
assert (
mwman.methods["process_spider_output"][0] == mw.process_spider_output # pylint: disable=comparison-with-callable
)
def test_async_mw(self, mwman: SpiderMiddlewareManager) -> None:
mw = ProcessSpiderOutputAsyncGenMiddleware()
mwman._add_middleware(mw)
assert (
mwman.methods["process_spider_output"][0] == mw.process_spider_output # pylint: disable=comparison-with-callable
)
def test_universal_mw(self, mwman: SpiderMiddlewareManager) -> None:
mw = ProcessSpiderOutputUniversalMiddleware()
mwman._add_middleware(mw)
assert mwman.methods["process_spider_output"][0] == (
mw.process_spider_output,
mw.process_spider_output_async,
)
def test_universal_mw_no_sync(
self, mwman: SpiderMiddlewareManager, caplog: pytest.LogCaptureFixture
) -> None:
mwman._add_middleware(UniversalMiddlewareNoSync())
assert (
"UniversalMiddlewareNoSync has process_spider_output_async"
" without process_spider_output" in caplog.text
)
assert mwman.methods["process_spider_output"][0] is None
def test_universal_mw_both_sync(
self, mwman: SpiderMiddlewareManager, caplog: pytest.LogCaptureFixture
) -> None:
mw = UniversalMiddlewareBothSync()
mwman._add_middleware(mw)
assert (
"UniversalMiddlewareBothSync.process_spider_output_async "
"is not an async generator function" in caplog.text
)
assert (
mwman.methods["process_spider_output"][0] == mw.process_spider_output # pylint: disable=comparison-with-callable
)
def test_universal_mw_both_async(
self, mwman: SpiderMiddlewareManager, caplog: pytest.LogCaptureFixture
) -> None:
mwman._add_middleware(UniversalMiddlewareBothAsync())
assert (
"UniversalMiddlewareBothAsync.process_spider_output "
"is an async generator function while process_spider_output_async exists"
in caplog.text
)
assert mwman.methods["process_spider_output"][0] is None
|
TestUniversalMiddlewareManager
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_quantization.py
|
{
"start": 80166,
"end": 80511
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.rand((5, 5))
self.bias = torch.zeros(5)
def forward(self, x):
return F.linear(x, self.weight, self.bias)
def get_example_inputs(self) -> tuple[Any, ...]:
return (torch.rand(1, 5),)
|
FunctionalLinear
|
python
|
prompt-toolkit__python-prompt-toolkit
|
tests/test_print_formatted_text.py
|
{
"start": 359,
"end": 3039
}
|
class ____:
"Emulate an stdout object."
def __init__(self):
self._data = []
def write(self, data):
self._data.append(data)
@property
def data(self):
return "".join(self._data)
def flush(self):
pass
def isatty(self):
return True
def fileno(self):
# File descriptor is not used for printing formatted text.
# (It is only needed for getting the terminal size.)
return -1
@pytest.mark.skipif(is_windows(), reason="Doesn't run on Windows yet.")
def test_print_formatted_text():
f = _Capture()
pt_print([("", "hello"), ("", "world")], file=f)
assert "hello" in f.data
assert "world" in f.data
@pytest.mark.skipif(is_windows(), reason="Doesn't run on Windows yet.")
def test_print_formatted_text_backslash_r():
f = _Capture()
pt_print("hello\r\n", file=f)
assert "hello" in f.data
@pytest.mark.skipif(is_windows(), reason="Doesn't run on Windows yet.")
def test_formatted_text_with_style():
f = _Capture()
style = Style.from_dict(
{
"hello": "#ff0066",
"world": "#44ff44 italic",
}
)
tokens = FormattedText(
[
("class:hello", "Hello "),
("class:world", "world"),
]
)
# NOTE: We pass the default (8bit) color depth, so that the unit tests
# don't start failing when environment variables change.
pt_print(tokens, style=style, file=f, color_depth=ColorDepth.DEFAULT)
assert "\x1b[0;38;5;197mHello" in f.data
assert "\x1b[0;38;5;83;3mworld" in f.data
@pytest.mark.skipif(is_windows(), reason="Doesn't run on Windows yet.")
def test_html_with_style():
"""
Text `print_formatted_text` with `HTML` wrapped in `to_formatted_text`.
"""
f = _Capture()
html = HTML("<ansigreen>hello</ansigreen> <b>world</b>")
formatted_text = to_formatted_text(html, style="class:myhtml")
pt_print(formatted_text, file=f, color_depth=ColorDepth.DEFAULT)
assert (
f.data
== "\x1b[0m\x1b[?7h\x1b[0;32mhello\x1b[0m \x1b[0;1mworld\x1b[0m\r\n\x1b[0m"
)
@pytest.mark.skipif(is_windows(), reason="Doesn't run on Windows yet.")
def test_print_formatted_text_with_dim():
"""
Test that dim formatting works correctly.
"""
f = _Capture()
style = Style.from_dict(
{
"dimtext": "dim",
}
)
tokens = FormattedText([("class:dimtext", "dim text")])
pt_print(tokens, style=style, file=f, color_depth=ColorDepth.DEFAULT)
# Check that the ANSI dim escape code (ESC[2m) is in the output
assert "\x1b[0;2m" in f.data or "\x1b[2m" in f.data
|
_Capture
|
python
|
google__jax
|
tests/api_test.py
|
{
"start": 257174,
"end": 257710
}
|
class ____(jtu.JaxTestCase):
@unittest.skipIf(not sys.executable, "test requires sys.executable")
@jtu.run_on_devices("cpu")
def test_no_backend_warning_on_cpu_if_platform_specified(self):
warning_not_expected = (
"import jax; "
"jax.config.update('jax_platform_name', 'cpu'); "
"jax.numpy.arange(10)")
result = subprocess.run([sys.executable, '-c', warning_not_expected],
check=True, capture_output=True)
assert "may be present" not in result.stderr.decode()
|
BackendsTest
|
python
|
PyCQA__pylint
|
tests/functional/u/unused/unused_private_member.py
|
{
"start": 2370,
"end": 2780
}
|
class ____:
"""Regression test for 4644"""
__seventyseven = 77
__ninetyone = 91
def __init__(self):
self.twentyone = 21 * (1 / (self.__seventyseven + 33)) % 100
self.ninetyfive = Klass.__ninetyone + 4
k = Klass()
print(k.twentyone)
print(k.ninetyfive)
# https://github.com/pylint-dev/pylint/issues/4657
# Mutation of class member with cls should not fire a false-positive
|
Klass
|
python
|
pypa__warehouse
|
warehouse/oidc/views.py
|
{
"start": 1342,
"end": 1403
}
|
class ____(TypedDict):
code: str
description: str
|
Error
|
python
|
docker__docker-py
|
docker/types/services.py
|
{
"start": 29780,
"end": 30493
}
|
class ____(dict):
"""
Specification for DNS related configurations in resolver configuration
file (``resolv.conf``). Part of a :py:class:`ContainerSpec` definition.
Args:
nameservers (:py:class:`list`): The IP addresses of the name
servers.
search (:py:class:`list`): A search list for host-name lookup.
options (:py:class:`list`): A list of internal resolver variables
to be modified (e.g., ``debug``, ``ndots:3``, etc.).
"""
def __init__(self, nameservers=None, search=None, options=None):
self['Nameservers'] = nameservers
self['Search'] = search
self['Options'] = options
|
DNSConfig
|
python
|
walkccc__LeetCode
|
solutions/2379. Minimum Recolors to Get K Consecutive Black Blocks/2379.py
|
{
"start": 0,
"end": 318
}
|
class ____:
def minimumRecolors(self, blocks: str, k: int) -> int:
countB = 0
maxCountB = 0
for i, block in enumerate(blocks):
if block == 'B':
countB += 1
if i >= k and blocks[i - k] == 'B':
countB -= 1
maxCountB = max(maxCountB, countB)
return k - maxCountB
|
Solution
|
python
|
spack__spack
|
lib/spack/spack/test/repo.py
|
{
"start": 18269,
"end": 19136
}
|
class ____(PackageBase):
pass
"""
)
with spack.repo.use_repositories(str(repo_dir)) as repo:
assert len(repo.all_package_names()) == 0
stderr = capsys.readouterr().err
assert "cannot be used because `zlib-ng` is not a valid Spack package module name" in stderr
assert "cannot be used because `UPPERCASE` is not a valid Spack package module name" in stderr
def test_repo_v2_module_and_class_to_package_name(tmp_path: pathlib.Path, capsys):
# Create a repo with a v2 structure
root, _ = spack.repo.create_repo(str(tmp_path), namespace="repo_2", package_api=(2, 0))
repo_dir = pathlib.Path(root)
# Create an invalid module name
(repo_dir / "packages" / "_1example_2_test").mkdir()
(repo_dir / "packages" / "_1example_2_test" / "package.py").write_text(
"""
from spack.package import PackageBase
|
Uppercase
|
python
|
pallets__werkzeug
|
src/werkzeug/sansio/request.py
|
{
"start": 1268,
"end": 19832
}
|
class ____:
"""Represents the non-IO parts of a HTTP request, including the
method, URL info, and headers.
This class is not meant for general use. It should only be used when
implementing WSGI, ASGI, or another HTTP application spec. Werkzeug
provides a WSGI implementation at :cls:`werkzeug.wrappers.Request`.
:param method: The method the request was made with, such as
``GET``.
:param scheme: The URL scheme of the protocol the request used, such
as ``https`` or ``wss``.
:param server: The address of the server. ``(host, port)``,
``(path, None)`` for unix sockets, or ``None`` if not known.
:param root_path: The prefix that the application is mounted under.
This is prepended to generated URLs, but is not part of route
matching.
:param path: The path part of the URL after ``root_path``.
:param query_string: The part of the URL after the "?".
:param headers: The headers received with the request.
:param remote_addr: The address of the client sending the request.
.. versionchanged:: 3.0
The ``charset``, ``url_charset``, and ``encoding_errors`` attributes
were removed.
.. versionadded:: 2.0
"""
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. A :class:`~werkzeug.datastructures.ImmutableDict`
#: is faster but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class: type[MultiDict[str, t.Any]] = ImmutableMultiDict
#: The type to be used for dict values from the incoming WSGI
#: environment. (For example for :attr:`cookies`.) By default an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used.
#:
#: .. versionchanged:: 1.0.0
#: Changed to ``ImmutableMultiDict`` to support multiple values.
#:
#: .. versionadded:: 0.6
dict_storage_class: type[MultiDict[str, t.Any]] = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class: type[list[t.Any]] = ImmutableList
user_agent_class: type[UserAgent] = UserAgent
"""The class used and returned by the :attr:`user_agent` property to
parse the header. Defaults to
:class:`~werkzeug.user_agent.UserAgent`, which does no parsing. An
extension can provide a subclass that uses a parser to provide other
data.
.. versionadded:: 2.0
"""
#: Valid host names when handling requests. By default all hosts are
#: trusted, which means that whatever the client says the host is
#: will be accepted.
#:
#: Because ``Host`` and ``X-Forwarded-Host`` headers can be set to
#: any value by a malicious client, it is recommended to either set
#: this property or implement similar validation in the proxy (if
#: the application is being run behind one).
#:
#: .. versionadded:: 0.9
trusted_hosts: list[str] | None = None
def __init__(
self,
method: str,
scheme: str,
server: tuple[str, int | None] | None,
root_path: str,
path: str,
query_string: bytes,
headers: Headers,
remote_addr: str | None,
) -> None:
#: The method the request was made with, such as ``GET``.
self.method = method.upper()
#: The URL scheme of the protocol the request used, such as
#: ``https`` or ``wss``.
self.scheme = scheme
#: The address of the server. ``(host, port)``, ``(path, None)``
#: for unix sockets, or ``None`` if not known.
self.server = server
#: The prefix that the application is mounted under, without a
#: trailing slash. :attr:`path` comes after this.
self.root_path = root_path.rstrip("/")
#: The path part of the URL after :attr:`root_path`. This is the
#: path used for routing within the application.
self.path = "/" + path.lstrip("/")
#: The part of the URL after the "?". This is the raw value, use
#: :attr:`args` for the parsed values.
self.query_string = query_string
#: The headers received with the request.
self.headers = headers
#: The address of the client sending the request.
self.remote_addr = remote_addr
def __repr__(self) -> str:
try:
url = self.url
except Exception as e:
url = f"(invalid URL: {e})"
return f"<{type(self).__name__} {url!r} [{self.method}]>"
@cached_property
def args(self) -> MultiDict[str, str]:
"""The parsed URL parameters (the part in the URL after the question
mark).
By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
.. versionchanged:: 2.3
Invalid bytes remain percent encoded.
"""
return self.parameter_storage_class(
parse_qsl(
self.query_string.decode(),
keep_blank_values=True,
errors="werkzeug.url_quote",
)
)
@cached_property
def access_route(self) -> list[str]:
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if "X-Forwarded-For" in self.headers:
return self.list_storage_class(
parse_list_header(self.headers["X-Forwarded-For"])
)
elif self.remote_addr is not None:
return self.list_storage_class([self.remote_addr])
return self.list_storage_class()
@cached_property
def full_path(self) -> str:
"""Requested path, including the query string."""
return f"{self.path}?{self.query_string.decode()}"
@property
def is_secure(self) -> bool:
"""``True`` if the request was made with a secure protocol
(HTTPS or WSS).
"""
return self.scheme in {"https", "wss"}
@cached_property
def url(self) -> str:
"""The full request URL with the scheme, host, root path, path,
and query string."""
return get_current_url(
self.scheme, self.host, self.root_path, self.path, self.query_string
)
@cached_property
def base_url(self) -> str:
"""Like :attr:`url` but without the query string."""
return get_current_url(self.scheme, self.host, self.root_path, self.path)
@cached_property
def root_url(self) -> str:
"""The request URL scheme, host, and root path. This is the root
that the application is accessed from.
"""
return get_current_url(self.scheme, self.host, self.root_path)
@cached_property
def host_url(self) -> str:
"""The request URL scheme and host only."""
return get_current_url(self.scheme, self.host)
@cached_property
def host(self) -> str:
"""The host name the request was made to, including the port if
it's non-standard. Validated with :attr:`trusted_hosts`.
"""
return get_host(
self.scheme, self.headers.get("host"), self.server, self.trusted_hosts
)
@cached_property
def cookies(self) -> ImmutableMultiDict[str, str]:
"""A :class:`dict` with the contents of all cookies transmitted with
the request."""
wsgi_combined_cookie = ";".join(self.headers.getlist("Cookie"))
return parse_cookie( # type: ignore
wsgi_combined_cookie, cls=self.dict_storage_class
)
# Common Descriptors
content_type = header_property[str](
"Content-Type",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
read_only=True,
)
@cached_property
def content_length(self) -> int | None:
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(
http_content_length=self.headers.get("Content-Length"),
http_transfer_encoding=self.headers.get("Transfer-Encoding"),
)
content_encoding = header_property[str](
"Content-Encoding",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.
.. versionadded:: 0.9""",
read_only=True,
)
content_md5 = header_property[str](
"Content-MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)
.. versionadded:: 0.9""",
read_only=True,
)
referrer = header_property[str](
"Referer",
doc="""The Referer[sic] request-header field allows the client
to specify, for the server's benefit, the address (URI) of the
resource from which the Request-URI was obtained (the
"referrer", although the header field is misspelled).""",
read_only=True,
)
date = header_property(
"Date",
None,
parse_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
""",
read_only=True,
)
max_forwards = header_property(
"Max-Forwards",
None,
int,
doc="""The Max-Forwards request-header field provides a
mechanism with the TRACE and OPTIONS methods to limit the number
of proxies or gateways that can forward the request to the next
inbound server.""",
read_only=True,
)
def _parse_content_type(self) -> None:
if not hasattr(self, "_parsed_content_type"):
self._parsed_content_type = parse_options_header(
self.headers.get("Content-Type", "")
)
@property
def mimetype(self) -> str:
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self) -> dict[str, str]:
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self) -> HeaderSet:
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.headers.get("Pragma", ""))
# Accept
@cached_property
def accept_mimetypes(self) -> MIMEAccept:
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.headers.get("Accept"), MIMEAccept)
@cached_property
def accept_charsets(self) -> CharsetAccept:
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.headers.get("Accept-Charset"), CharsetAccept)
@cached_property
def accept_encodings(self) -> Accept:
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.headers.get("Accept-Encoding"))
@cached_property
def accept_languages(self) -> LanguageAccept:
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.headers.get("Accept-Language"), LanguageAccept)
# ETag
@cached_property
def cache_control(self) -> RequestCacheControl:
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.headers.get("Cache-Control")
return parse_cache_control_header(cache_control, None, RequestCacheControl)
@cached_property
def if_match(self) -> ETags:
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.headers.get("If-Match"))
@cached_property
def if_none_match(self) -> ETags:
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.headers.get("If-None-Match"))
@cached_property
def if_modified_since(self) -> datetime | None:
"""The parsed `If-Modified-Since` header as a datetime object.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
"""
return parse_date(self.headers.get("If-Modified-Since"))
@cached_property
def if_unmodified_since(self) -> datetime | None:
"""The parsed `If-Unmodified-Since` header as a datetime object.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
"""
return parse_date(self.headers.get("If-Unmodified-Since"))
@cached_property
def if_range(self) -> IfRange:
"""The parsed ``If-Range`` header.
.. versionchanged:: 2.0
``IfRange.date`` is timezone-aware.
.. versionadded:: 0.7
"""
return parse_if_range_header(self.headers.get("If-Range"))
@cached_property
def range(self) -> Range | None:
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.headers.get("Range"))
# User Agent
@cached_property
def user_agent(self) -> UserAgent:
"""The user agent. Use ``user_agent.string`` to get the header
value. Set :attr:`user_agent_class` to a subclass of
:class:`~werkzeug.user_agent.UserAgent` to provide parsing for
the other properties or other extended data.
.. versionchanged:: 2.1
The built-in parser was removed. Set ``user_agent_class`` to a ``UserAgent``
subclass to parse data from the string.
"""
return self.user_agent_class(self.headers.get("User-Agent", ""))
# Authorization
@cached_property
def authorization(self) -> Authorization | None:
"""The ``Authorization`` header parsed into an :class:`.Authorization` object.
``None`` if the header is not present.
.. versionchanged:: 2.3
:class:`Authorization` is no longer a ``dict``. The ``token`` attribute
was added for auth schemes that use a token instead of parameters.
"""
return Authorization.from_header(self.headers.get("Authorization"))
# CORS
origin = header_property[str](
"Origin",
doc=(
"The host that the request originated from. Set"
" :attr:`~CORSResponseMixin.access_control_allow_origin` on"
" the response to indicate which origins are allowed."
),
read_only=True,
)
access_control_request_headers = header_property(
"Access-Control-Request-Headers",
load_func=parse_set_header,
doc=(
"Sent with a preflight request to indicate which headers"
" will be sent with the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_headers`"
" on the response to indicate which headers are allowed."
),
read_only=True,
)
access_control_request_method = header_property[str](
"Access-Control-Request-Method",
doc=(
"Sent with a preflight request to indicate which method"
" will be used for the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_methods`"
" on the response to indicate which methods are allowed."
),
read_only=True,
)
@property
def is_json(self) -> bool:
"""Check if the mimetype indicates JSON data, either
:mimetype:`application/json` or :mimetype:`application/*+json`.
"""
mt = self.mimetype
return (
mt == "application/json"
or mt.startswith("application/")
and mt.endswith("+json")
)
|
Request
|
python
|
numba__numba
|
numba/core/utils.py
|
{
"start": 6822,
"end": 9629
}
|
class ____(object):
OPTIONS = {}
def __init__(self):
self._values = self.OPTIONS.copy()
def set(self, name, value=True):
if name not in self.OPTIONS:
raise NameError("Invalid flag: %s" % name)
self._values[name] = value
def unset(self, name):
self.set(name, False)
def _check_attr(self, name):
if name not in self.OPTIONS:
raise AttributeError("Invalid flag: %s" % name)
def __getattr__(self, name):
self._check_attr(name)
return self._values[name]
def __setattr__(self, name, value):
if name.startswith('_'):
super(ConfigOptions, self).__setattr__(name, value)
else:
self._check_attr(name)
self._values[name] = value
def __repr__(self):
return "Flags(%s)" % ', '.join('%s=%s' % (k, v)
for k, v in self._values.items()
if v is not False)
def copy(self):
copy = type(self)()
copy._values = self._values.copy()
return copy
def __eq__(self, other):
return (isinstance(other, ConfigOptions) and
other._values == self._values)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(tuple(sorted(self._values.items())))
def order_by_target_specificity(target, templates, fnkey=''):
"""This orders the given templates from most to least specific against the
current "target". "fnkey" is an indicative typing key for use in the
exception message in the case that there's no usable templates for the
current "target".
"""
# No templates... return early!
if templates == []:
return []
from numba.core.target_extension import target_registry
# fish out templates that are specific to the target if a target is
# specified
DEFAULT_TARGET = 'generic'
usable = []
for ix, temp_cls in enumerate(templates):
# ? Need to do something about this next line
md = getattr(temp_cls, "metadata", {})
hw = md.get('target', DEFAULT_TARGET)
if hw is not None:
hw_clazz = target_registry[hw]
if target.inherits_from(hw_clazz):
usable.append((temp_cls, hw_clazz, ix))
# sort templates based on target specificity
def key(x):
return target.__mro__.index(x[1])
order = [x[0] for x in sorted(usable, key=key)]
if not order:
msg = (f"Function resolution cannot find any matches for function "
f"'{fnkey}' for the current target: '{target}'.")
from numba.core.errors import UnsupportedError
raise UnsupportedError(msg)
return order
T = _tp.TypeVar('T')
|
ConfigOptions
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_color.py
|
{
"start": 8290,
"end": 9064
}
|
class ____(_scale_color_continuous):
"""
Create a n color gradient
See Also
--------
plotnine.scale_color_gradient
plotnine.scale_color_gradientn
mizani.palettes.gradient_n_pal : The palette class that generates
the colour gradient.
"""
colors: InitVar[Sequence[str]]
"""
List of colors
"""
values: InitVar[Sequence[float] | None] = None
"""
list of points in the range [0, 1] at which to place each color.
Must be the same size as `colors`. Default to evenly space the colors
"""
def __post_init__(self, colors, values):
from mizani.palettes import gradient_n_pal
super().__post_init__()
self.palette = gradient_n_pal(colors, values)
@dataclass
|
scale_color_gradientn
|
python
|
django-import-export__django-import-export
|
tests/core/tests/test_tmp_storages.py
|
{
"start": 376,
"end": 810
}
|
class ____(TestCase):
def setUp(self):
self.storage = BaseStorage()
def test_save(self):
with self.assertRaises(NotImplementedError):
self.storage.save(None)
def test_read(self):
with self.assertRaises(NotImplementedError):
self.storage.read()
def test_remove(self):
with self.assertRaises(NotImplementedError):
self.storage.remove()
|
TestBaseStorage
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/hexdump.py
|
{
"start": 394,
"end": 3507
}
|
class ____(RegexLexer):
"""
For typical hex dump output formats by the UNIX and GNU/Linux tools ``hexdump``,
``hd``, ``hexcat``, ``od`` and ``xxd``, and the DOS tool ``DEBUG``. For example:
.. sourcecode:: hexdump
00000000 7f 45 4c 46 02 01 01 00 00 00 00 00 00 00 00 00 |.ELF............|
00000010 02 00 3e 00 01 00 00 00 c5 48 40 00 00 00 00 00 |..>......H@.....|
The specific supported formats are the outputs of:
* ``hexdump FILE``
* ``hexdump -C FILE`` -- the `canonical` format used in the example.
* ``hd FILE`` -- same as ``hexdump -C FILE``.
* ``hexcat FILE``
* ``od -t x1z FILE``
* ``xxd FILE``
* ``DEBUG.EXE FILE.COM`` and entering ``d`` to the prompt.
.. versionadded:: 2.1
"""
name = 'Hexdump'
aliases = ['hexdump']
hd = r'[0-9A-Ha-h]'
tokens = {
'root': [
(r'\n', Text),
include('offset'),
(r'('+hd+r'{2})(\-)('+hd+r'{2})',
bygroups(Number.Hex, Punctuation, Number.Hex)),
(hd+r'{2}', Number.Hex),
(r'(\s{2,3})(\>)(.{16})(\<)$',
bygroups(Text, Punctuation, String, Punctuation), 'bracket-strings'),
(r'(\s{2,3})(\|)(.{16})(\|)$',
bygroups(Text, Punctuation, String, Punctuation), 'piped-strings'),
(r'(\s{2,3})(\>)(.{1,15})(\<)$',
bygroups(Text, Punctuation, String, Punctuation)),
(r'(\s{2,3})(\|)(.{1,15})(\|)$',
bygroups(Text, Punctuation, String, Punctuation)),
(r'(\s{2,3})(.{1,15})$', bygroups(Text, String)),
(r'(\s{2,3})(.{16}|.{20})$', bygroups(Text, String), 'nonpiped-strings'),
(r'\s', Text),
(r'^\*', Punctuation),
],
'offset': [
(r'^('+hd+'+)(:)', bygroups(Name.Label, Punctuation), 'offset-mode'),
(r'^'+hd+'+', Name.Label),
],
'offset-mode': [
(r'\s', Text, '#pop'),
(hd+'+', Name.Label),
(r':', Punctuation)
],
'piped-strings': [
(r'\n', Text),
include('offset'),
(hd+r'{2}', Number.Hex),
(r'(\s{2,3})(\|)(.{1,16})(\|)$',
bygroups(Text, Punctuation, String, Punctuation)),
(r'\s', Text),
(r'^\*', Punctuation),
],
'bracket-strings': [
(r'\n', Text),
include('offset'),
(hd+r'{2}', Number.Hex),
(r'(\s{2,3})(\>)(.{1,16})(\<)$',
bygroups(Text, Punctuation, String, Punctuation)),
(r'\s', Text),
(r'^\*', Punctuation),
],
'nonpiped-strings': [
(r'\n', Text),
include('offset'),
(r'('+hd+r'{2})(\-)('+hd+r'{2})',
bygroups(Number.Hex, Punctuation, Number.Hex)),
(hd+r'{2}', Number.Hex),
(r'(\s{19,})(.{1,20}?)$', bygroups(Text, String)),
(r'(\s{2,3})(.{1,20})$', bygroups(Text, String)),
(r'\s', Text),
(r'^\*', Punctuation),
],
}
|
HexdumpLexer
|
python
|
numba__llvmlite
|
llvmlite/binding/value.py
|
{
"start": 997,
"end": 1685
}
|
class ____(enum.IntEnum):
# The LLVMValueKind enum from llvm-c/Core.h
argument = 0
basic_block = 1
memory_use = 2
memory_def = 3
memory_phi = 4
function = 5
global_alias = 6
global_ifunc = 7
global_variable = 8
block_address = 9
constant_expr = 10
constant_array = 11
constant_struct = 12
constant_vector = 13
undef_value = 14
constant_aggregate_zero = 15
constant_data_array = 16
constant_data_vector = 17
constant_int = 18
constant_fp = 19
constant_pointer_null = 20
constant_token_none = 21
metadata_as_value = 22
inline_asm = 23
instruction = 24
poison_value = 25
|
ValueKind
|
python
|
pypa__pipenv
|
pipenv/vendor/colorama/winterm.py
|
{
"start": 621,
"end": 7134
}
|
class ____(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
# In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
# So that LIGHT_EX colors and BRIGHT style do not clobber each other,
# we track them separately, since LIGHT_EX is overwritten by Fore/Back
# and BRIGHT is overwritten by Style codes.
self._light = 0
def get_attrs(self):
return self._fore + self._back * 16 + (self._style | self._light)
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
self._light = 0
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
# Emulate LIGHT_EX with BRIGHT Style
if light:
self._light |= WinStyle.BRIGHT
else:
self._light &= ~WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
# Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
if light:
self._light |= WinStyle.BRIGHT_BACKGROUND
else:
self._light &= ~WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
# I'm not currently tracking the position, so there is no default.
# position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
elif mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
elif mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title)
def enable_vt_processing(fd):
if win32.windll is None or not win32.winapi_test():
return False
try:
handle = get_osfhandle(fd)
mode = win32.GetConsoleMode(handle)
win32.SetConsoleMode(
handle,
mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING,
)
mode = win32.GetConsoleMode(handle)
if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING:
return True
# Can get TypeError in testsuite where 'fd' is a Mock()
except (OSError, TypeError):
return False
|
WinTerm
|
python
|
ray-project__ray
|
python/ray/tune/tests/test_tune_restore_warm_start.py
|
{
"start": 822,
"end": 4931
}
|
class ____:
def setUp(self):
ray.init(num_cpus=1)
self.tmpdir = tempfile.mkdtemp()
self.experiment_name = "results"
def tearDown(self):
shutil.rmtree(self.tmpdir)
ray.shutdown()
_register_all()
def set_basic_conf(self):
raise NotImplementedError()
def get_scheduler(self):
return None
def treat_trial_config(self, trial_config):
return trial_config
def run_part_from_scratch(self):
np.random.seed(162)
search_alg, cost = self.set_basic_conf()
if not isinstance(search_alg, ConcurrencyLimiter):
search_alg = ConcurrencyLimiter(search_alg, 1)
results_exp_1 = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
scheduler=self.get_scheduler(),
verbose=0,
name=self.experiment_name,
storage_path=self.tmpdir,
reuse_actors=True,
)
checkpoint_path = os.path.join(self.tmpdir, "warmStartTest.pkl")
search_alg.save(checkpoint_path)
return results_exp_1, np.random.get_state(), checkpoint_path
def run_from_experiment_restore(self, random_state):
search_alg, cost = self.set_basic_conf()
if not isinstance(search_alg, ConcurrencyLimiter):
search_alg = ConcurrencyLimiter(search_alg, 1)
search_alg.restore_from_dir(os.path.join(self.tmpdir, self.experiment_name))
results = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
scheduler=self.get_scheduler(),
verbose=0,
name=self.experiment_name,
storage_path=self.tmpdir,
reuse_actors=True,
)
return results
def run_explicit_restore(self, random_state, checkpoint_path):
np.random.set_state(random_state)
search_alg2, cost = self.set_basic_conf()
if not isinstance(search_alg2, ConcurrencyLimiter):
search_alg2 = ConcurrencyLimiter(search_alg2, 1)
search_alg2.restore(checkpoint_path)
return tune.run(
cost,
num_samples=5,
search_alg=search_alg2,
scheduler=self.get_scheduler(),
verbose=0,
reuse_actors=True,
)
def run_full(self):
np.random.seed(162)
search_alg3, cost = self.set_basic_conf()
if not isinstance(search_alg3, ConcurrencyLimiter):
search_alg3 = ConcurrencyLimiter(search_alg3, 1)
return tune.run(
cost,
num_samples=10,
search_alg=search_alg3,
scheduler=self.get_scheduler(),
verbose=0,
reuse_actors=True,
)
def testWarmStart(self):
results_exp_1, r_state, checkpoint_path = self.run_part_from_scratch()
results_exp_2 = self.run_explicit_restore(r_state, checkpoint_path)
results_exp_3 = self.run_full()
trials_1_config = self.treat_trial_config(
[trial.config for trial in results_exp_1.trials]
)
trials_2_config = self.treat_trial_config(
[trial.config for trial in results_exp_2.trials]
)
trials_3_config = self.treat_trial_config(
[trial.config for trial in results_exp_3.trials]
)
self.assertEqual(trials_1_config + trials_2_config, trials_3_config)
def testRestore(self):
results_exp_1, r_state, checkpoint_path = self.run_part_from_scratch()
results_exp_2 = self.run_from_experiment_restore(r_state)
results_exp_3 = self.run_full()
trials_1_config = self.treat_trial_config(
[trial.config for trial in results_exp_1.trials]
)
trials_2_config = self.treat_trial_config(
[trial.config for trial in results_exp_2.trials]
)
trials_3_config = self.treat_trial_config(
[trial.config for trial in results_exp_3.trials]
)
self.assertEqual(trials_1_config + trials_2_config, trials_3_config)
|
AbstractWarmStartTest
|
python
|
MorvanZhou__Reinforcement-learning-with-tensorflow
|
contents/10_A3C/A3C_RNN.py
|
{
"start": 5384,
"end": 9429
}
|
class ____(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
rnn_state = SESS.run(self.AC.init_state) # zero rnn state at beginning
keep_state = rnn_state.copy() # keep rnn state for updating global net
for ep_t in range(MAX_EP_STEP):
if self.name == 'W_0':
self.env.render()
a, rnn_state_ = self.AC.choose_action(s, rnn_state) # get the action and next rnn state
s_, r, done, info = self.env.step(a)
done = True if ep_t == MAX_EP_STEP - 1 else False
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r+8)/8) # normalize
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :], self.AC.init_state: rnn_state_})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
self.AC.init_state: keep_state,
}
self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
keep_state = rnn_state_.copy() # replace the keep_state as the new initial rnn state_
s = s_
rnn_state = rnn_state_ # renew rnn state
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
|
Worker
|
python
|
pydata__xarray
|
asv_bench/benchmarks/combine.py
|
{
"start": 1681,
"end": 2833
}
|
class ____:
"""Benchmark concatenating and merging large datasets"""
def setup(self):
"""Create 4 datasets with two different variables"""
t_size, x_size, y_size = 50, 450, 400
t = np.arange(t_size)
data = np.random.randn(t_size, x_size, y_size)
self.dsA0 = xr.Dataset(
{"A": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))}
)
self.dsA1 = xr.Dataset(
{"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))}
)
self.dsB0 = xr.Dataset(
{"B": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))}
)
self.dsB1 = xr.Dataset(
{"B": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))}
)
def time_combine_nested(self):
datasets = [[self.dsA0, self.dsA1], [self.dsB0, self.dsB1]]
xr.combine_nested(datasets, concat_dim=[None, "T"])
def time_combine_by_coords(self):
"""Also has to load and arrange t coordinate"""
datasets = [self.dsA0, self.dsA1, self.dsB0, self.dsB1]
xr.combine_by_coords(datasets)
|
Combine3d
|
python
|
django__django
|
tests/generic_relations_regress/models.py
|
{
"start": 4078,
"end": 4192
}
|
class ____(models.Model):
b = models.ForeignKey(B, models.CASCADE)
class Meta:
ordering = ("id",)
|
C
|
python
|
huggingface__transformers
|
src/transformers/models/owlvit/modeling_owlvit.py
|
{
"start": 12491,
"end": 16074
}
|
class ____(nn.Module):
def __init__(self, config: OwlViTVisionConfig):
super().__init__()
self.patch_size = config.patch_size
self.config = config
self.embed_dim = config.hidden_size
self.class_embedding = nn.Parameter(torch.randn(config.hidden_size))
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=config.patch_size,
stride=config.patch_size,
bias=False,
)
self.num_patches = (config.image_size // config.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings.interpolate_pos_encoding
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
patch_embeds = self.patch_embedding(pixel_values) # shape = [batch_size, num_channels, height, width]
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
|
OwlViTVisionEmbeddings
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 328625,
"end": 335799
}
|
class ____(CompositeMarkDef):
"""
ErrorBandDef schema wrapper.
Parameters
----------
type : :class:`ErrorBand`, Literal['errorband']
The mark type. This could a primitive mark type (one of ``"bar"``, ``"circle"``,
``"square"``, ``"tick"``, ``"line"``, ``"area"``, ``"point"``, ``"geoshape"``,
``"rule"``, and ``"text"``) or a composite mark type (``"boxplot"``,
``"errorband"``, ``"errorbar"``).
band : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig`
borders : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig`
clip : bool
Whether a composite mark be clipped to the enclosing group's width and height.
color : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Default color.
**Default value:** ``"#4682b4"``
**Note:**
* This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
* The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and
will override ``color``.
extent : :class:`ErrorBarExtent`, Literal['ci', 'iqr', 'stderr', 'stdev']
The extent of the band. Available options include:
* ``"ci"``: Extend the band to the 95% bootstrapped confidence interval of the mean.
* ``"stderr"``: The size of band are set to the value of standard error, extending
from the mean.
* ``"stdev"``: The size of band are set to the value of standard deviation,
extending from the mean.
* ``"iqr"``: Extend the band to the q1 and q3.
**Default value:** ``"stderr"``.
interpolate : :class:`Interpolate`, Literal['basis', 'basis-open', 'basis-closed', 'bundle', 'cardinal', 'cardinal-open', 'cardinal-closed', 'catmull-rom', 'linear', 'linear-closed', 'monotone', 'natural', 'step', 'step-before', 'step-after']
The line interpolation method for the error band. One of the following:
* ``"linear"``: piecewise linear segments, as in a polyline.
* ``"linear-closed"``: close the linear segments to form a polygon.
* ``"step"``: a piecewise constant function (a step function) consisting of
alternating horizontal and vertical lines. The y-value changes at the midpoint of
each pair of adjacent x-values.
* ``"step-before"``: a piecewise constant function (a step function) consisting of
alternating horizontal and vertical lines. The y-value changes before the x-value.
* ``"step-after"``: a piecewise constant function (a step function) consisting of
alternating horizontal and vertical lines. The y-value changes after the x-value.
* ``"basis"``: a B-spline, with control point duplication on the ends.
* ``"basis-open"``: an open B-spline; may not intersect the start or end.
* ``"basis-closed"``: a closed B-spline, as in a loop.
* ``"cardinal"``: a Cardinal spline, with control point duplication on the ends.
* ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end,
but will intersect other control points.
* ``"cardinal-closed"``: a closed Cardinal spline, as in a loop.
* ``"bundle"``: equivalent to basis, except the tension parameter is used to
straighten the spline.
* ``"monotone"``: cubic interpolation that preserves monotonicity in y.
opacity : float
The opacity (value between [0,1]) of the mark.
orient : :class:`Orientation`, Literal['horizontal', 'vertical']
Orientation of the error band. This is normally automatically determined, but can be
specified when the orientation is ambiguous and cannot be automatically determined.
tension : float
The tension parameter for the interpolation type of the error band.
"""
_schema = {"$ref": "#/definitions/ErrorBandDef"}
def __init__(
self,
type: Optional[SchemaBase | ErrorBand_T] = Undefined,
band: Optional[bool | SchemaBase | Map] = Undefined,
borders: Optional[bool | SchemaBase | Map] = Undefined,
clip: Optional[bool] = Undefined,
color: Optional[str | Parameter | SchemaBase | Map | ColorName_T] = Undefined,
extent: Optional[SchemaBase | ErrorBarExtent_T] = Undefined,
interpolate: Optional[SchemaBase | Interpolate_T] = Undefined,
opacity: Optional[float] = Undefined,
orient: Optional[SchemaBase | Orientation_T] = Undefined,
tension: Optional[float] = Undefined,
**kwds,
):
super().__init__(
type=type,
band=band,
borders=borders,
clip=clip,
color=color,
extent=extent,
interpolate=interpolate,
opacity=opacity,
orient=orient,
tension=tension,
**kwds,
)
|
ErrorBandDef
|
python
|
viewflow__viewflow
|
viewflow/workflow/flow/nodes.py
|
{
"start": 8054,
"end": 8445
}
|
class ____(
mixins.NodeDetailMixin,
mixins.NodeExecuteMixin,
mixins.NodeCancelMixin,
mixins.NodeUndoMixin,
mixins.NodeReviveMixin,
nodes.Join,
):
index_view_class = views.IndexTaskView
detail_view_class = views.DetailTaskView
cancel_view_class = views.CancelTaskView
undo_view_class = views.UndoTaskView
revive_view_class = views.ReviveTaskView
|
Join
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/classVar2.py
|
{
"start": 174,
"end": 282
}
|
class ____(t.Protocol):
var1: t.ClassVar[str]
var2: t.ClassVar[str]
var3: _ClassVar = ["hi"]
|
Proto
|
python
|
mozilla__bleach
|
bleach/_vendor/html5lib/_utils.py
|
{
"start": 1333,
"end": 2358
}
|
class ____(dict):
"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
_dictEntries = []
for name, value in items:
if isinstance(name, (list, tuple, frozenset, set)):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
assert len(self) == len(_dictEntries)
self.default = None
def __getitem__(self, key):
return dict.get(self, key, self.default)
def __get__(self, instance, owner=None):
return BoundMethodDispatcher(instance, self)
|
MethodDispatcher
|
python
|
getsentry__sentry
|
src/social_auth/exceptions.py
|
{
"start": 47,
"end": 138
}
|
class ____(ValueError):
"""Base class for pipeline exceptions."""
|
SocialAuthBaseException
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py
|
{
"start": 3860,
"end": 4204
}
|
class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.String)
partitionName = graphene.NonNull(graphene.String)
runId = graphene.Field(graphene.String)
runStatus = graphene.Field(GrapheneRunStatus)
runDuration = graphene.Field(graphene.Float)
class Meta:
name = "PartitionStatus"
|
GraphenePartitionStatus
|
python
|
MongoEngine__mongoengine
|
mongoengine/base/document.py
|
{
"start": 832,
"end": 46593
}
|
class ____:
# TODO simplify how `_changed_fields` is used.
# Currently, handling of `_changed_fields` seems unnecessarily convoluted:
# 1. `BaseDocument` defines `_changed_fields` in its `__slots__`, yet it's
# not setting it to `[]` (or any other value) in `__init__`.
# 2. `EmbeddedDocument` sets `_changed_fields` to `[]` it its overloaded
# `__init__`.
# 3. `Document` does NOT set `_changed_fields` upon initialization. The
# field is primarily set via `_from_son` or `_clear_changed_fields`,
# though there are also other methods that manipulate it.
# 4. The codebase is littered with `hasattr` calls for `_changed_fields`.
__slots__ = (
"_changed_fields",
"_initialised",
"_created",
"_data",
"_dynamic_fields",
"_auto_id_field",
"_db_field_map",
"__weakref__",
)
_dynamic = False
_dynamic_lock = True
STRICT = False
def __init__(self, *args, **values):
"""
Initialise a document or an embedded document.
:param values: A dictionary of keys and values for the document.
It may contain additional reserved keywords, e.g. "__auto_convert".
:param __auto_convert: If True, supplied values will be converted
to Python-type values via each field's `to_python` method.
:param _created: Indicates whether this is a brand new document
or whether it's already been persisted before. Defaults to true.
"""
self._initialised = False
self._created = True
if args:
raise TypeError(
"Instantiating a document with positional arguments is not "
"supported. Please use `field_name=value` keyword arguments."
)
__auto_convert = values.pop("__auto_convert", True)
_created = values.pop("_created", True)
signals.pre_init.send(self.__class__, document=self, values=values)
# Check if there are undefined fields supplied to the constructor,
# if so raise an Exception.
if not self._dynamic and (self._meta.get("strict", True) or _created):
_undefined_fields = set(values.keys()) - set(
list(self._fields.keys()) + ["id", "pk", "_cls", "_text_score"]
)
if _undefined_fields:
msg = f'The fields "{_undefined_fields}" do not exist on the document "{self._class_name}"'
raise FieldDoesNotExist(msg)
if self.STRICT and not self._dynamic:
self._data = StrictDict.create(allowed_keys=self._fields_ordered)()
else:
self._data = {}
self._dynamic_fields = SON()
# Assign default values for fields
# not set in the constructor
for field_name in self._fields:
if field_name in values:
continue
value = getattr(self, field_name, None)
setattr(self, field_name, value)
if "_cls" not in values:
self._cls = self._class_name
# Set actual values
dynamic_data = {}
FileField = _import_class("FileField")
for key, value in values.items():
field = self._fields.get(key)
if field or key in ("id", "pk", "_cls"):
if __auto_convert and value is not None:
if field and not isinstance(field, FileField):
value = field.to_python(value)
setattr(self, key, value)
else:
if self._dynamic:
dynamic_data[key] = value
else:
# For strict Document
self._data[key] = value
# Set any get_<field>_display methods
self.__set_field_display()
if self._dynamic:
self._dynamic_lock = False
for key, value in dynamic_data.items():
setattr(self, key, value)
# Flag initialised
self._initialised = True
self._created = _created
signals.post_init.send(self.__class__, document=self)
def __delattr__(self, *args, **kwargs):
"""Handle deletions of fields"""
field_name = args[0]
if field_name in self._fields:
default = self._fields[field_name].default
if callable(default):
default = default()
setattr(self, field_name, default)
else:
super().__delattr__(*args, **kwargs)
def __setattr__(self, name, value):
# Handle dynamic data only if an initialised dynamic document
if self._dynamic and not self._dynamic_lock:
if name not in self._fields_ordered and not name.startswith("_"):
DynamicField = _import_class("DynamicField")
field = DynamicField(db_field=name, null=True)
field.name = name
self._dynamic_fields[name] = field
self._fields_ordered += (name,)
if not name.startswith("_"):
value = self.__expand_dynamic_values(name, value)
# Handle marking data as changed
if name in self._dynamic_fields:
self._data[name] = value
if hasattr(self, "_changed_fields"):
self._mark_as_changed(name)
try:
self__created = self._created
except AttributeError:
self__created = True
if (
self._is_document
and not self__created
and name in self._meta.get("shard_key", tuple())
and self._data.get(name) != value
):
msg = "Shard Keys are immutable. Tried to update %s" % name
raise OperationError(msg)
try:
self__initialised = self._initialised
except AttributeError:
self__initialised = False
# Check if the user has created a new instance of a class
if (
self._is_document
and self__initialised
and self__created
and name == self._meta.get("id_field")
):
# When setting the ID field of an instance already instantiated and that was user-created (i.e not saved in db yet)
# Typically this is when calling .save()
super().__setattr__("_created", False)
super().__setattr__(name, value)
def __getstate__(self):
data = {}
for k in (
"_changed_fields",
"_initialised",
"_created",
"_dynamic_fields",
"_fields_ordered",
):
if hasattr(self, k):
data[k] = getattr(self, k)
data["_data"] = self.to_mongo()
return data
def __setstate__(self, data):
if isinstance(data["_data"], SON):
data["_data"] = self.__class__._from_son(data["_data"])._data
for k in (
"_changed_fields",
"_initialised",
"_created",
"_data",
"_dynamic_fields",
):
if k in data:
setattr(self, k, data[k])
if "_fields_ordered" in data:
if self._dynamic:
self._fields_ordered = data["_fields_ordered"]
else:
_super_fields_ordered = type(self)._fields_ordered
self._fields_ordered = _super_fields_ordered
dynamic_fields = data.get("_dynamic_fields") or SON()
for k in dynamic_fields.keys():
setattr(self, k, data["_data"].get(k))
def __iter__(self):
return iter(self._fields_ordered)
def __getitem__(self, name):
"""Dictionary-style field access, return a field's value if present."""
try:
if name in self._fields_ordered:
return getattr(self, name)
except AttributeError:
pass
raise KeyError(name)
def __setitem__(self, name, value):
"""Dictionary-style field access, set a field's value."""
# Ensure that the field exists before settings its value
if not self._dynamic and name not in self._fields:
raise KeyError(name)
return setattr(self, name, value)
def __contains__(self, name):
try:
val = getattr(self, name)
return val is not None
except AttributeError:
return False
def __len__(self):
return len(self._data)
def __repr__(self):
try:
u = self.__str__()
except (UnicodeEncodeError, UnicodeDecodeError):
u = "[Bad Unicode data]"
repr_type = str if u is None else type(u)
return repr_type(f"<{self.__class__.__name__}: {u}>")
def __str__(self):
# TODO this could be simpler?
if hasattr(self, "__unicode__"):
return self.__unicode__()
return "%s object" % self.__class__.__name__
def __eq__(self, other):
if (
isinstance(other, self.__class__)
and hasattr(other, "id")
and other.id is not None
):
return self.id == other.id
if isinstance(other, DBRef):
return (
self._get_collection_name() == other.collection and self.id == other.id
)
if self.id is None:
return self is other
return False
def __ne__(self, other):
return not self.__eq__(other)
def clean(self):
"""
Hook for doing document level data cleaning (usually validation or assignment)
before validation is run.
Any ValidationError raised by this method will not be associated with
a particular field; it will have a special-case association with the
field defined by NON_FIELD_ERRORS.
"""
pass
def get_text_score(self):
"""
Get text score from text query
"""
if "_text_score" not in self._data:
raise InvalidDocumentError(
"This document is not originally built from a text query (or text_score was not set on search_text() call)"
)
return self._data["_text_score"]
def to_mongo(self, use_db_field=True, fields=None):
"""
Return as SON data ready for use with MongoDB.
"""
fields = fields or []
data = SON()
data["_id"] = None
data["_cls"] = self._class_name
# only root fields ['test1.a', 'test2'] => ['test1', 'test2']
root_fields = {f.split(".")[0] for f in fields}
for field_name in self:
if root_fields and field_name not in root_fields:
continue
value = self._data.get(field_name, None)
field = self._fields.get(field_name)
if field is None and self._dynamic:
field = self._dynamic_fields.get(field_name)
if value is not None:
f_inputs = field.to_mongo.__code__.co_varnames
ex_vars = {}
if fields and "fields" in f_inputs:
key = "%s." % field_name
embedded_fields = [
i.replace(key, "") for i in fields if i.startswith(key)
]
ex_vars["fields"] = embedded_fields
if "use_db_field" in f_inputs:
ex_vars["use_db_field"] = use_db_field
value = field.to_mongo(value, **ex_vars)
# Handle self generating fields
if value is None and field._auto_gen:
value = field.generate()
self._data[field_name] = value
if value is not None or field.null:
if use_db_field:
data[field.db_field] = value
else:
data[field.name] = value
# Only add _cls if allow_inheritance is True
if not self._meta.get("allow_inheritance"):
data.pop("_cls")
return data
def validate(self, clean=True):
"""Ensure that all fields' values are valid and that required fields
are present.
Raises :class:`ValidationError` if any of the fields' values are found
to be invalid.
"""
# Ensure that each field is matched to a valid value
errors = {}
if clean:
try:
self.clean()
except ValidationError as error:
errors[NON_FIELD_ERRORS] = error
# Get a list of tuples of field names and their current values
fields = [
(
self._fields.get(name, self._dynamic_fields.get(name)),
self._data.get(name),
)
for name in self._fields_ordered
]
EmbeddedDocumentField = _import_class("EmbeddedDocumentField")
GenericEmbeddedDocumentField = _import_class("GenericEmbeddedDocumentField")
for field, value in fields:
if value is not None:
try:
if isinstance(
field, (EmbeddedDocumentField, GenericEmbeddedDocumentField)
):
field._validate(value, clean=clean)
else:
field._validate(value)
except ValidationError as error:
errors[field.name] = error.errors or error
except (ValueError, AttributeError, AssertionError) as error:
errors[field.name] = error
elif field.required and not getattr(field, "_auto_gen", False):
errors[field.name] = ValidationError(
"Field is required", field_name=field.name
)
if errors:
pk = "None"
if hasattr(self, "pk"):
pk = self.pk
elif self._instance and hasattr(self._instance, "pk"):
pk = self._instance.pk
message = f"ValidationError ({self._class_name}:{pk}) "
raise ValidationError(message, errors=errors)
def to_json(self, *args, **kwargs):
"""Convert this document to JSON.
:param use_db_field: Serialize field names as they appear in
MongoDB (as opposed to attribute names on this document).
Defaults to True.
"""
use_db_field = kwargs.pop("use_db_field", True)
if "json_options" not in kwargs:
warnings.warn(
"No 'json_options' are specified! Falling back to "
"LEGACY_JSON_OPTIONS with uuid_representation=PYTHON_LEGACY. "
"For use with other MongoDB drivers specify the UUID "
"representation to use. This will be changed to "
"uuid_representation=UNSPECIFIED in a future release.",
DeprecationWarning,
stacklevel=2,
)
kwargs["json_options"] = LEGACY_JSON_OPTIONS
return json_util.dumps(self.to_mongo(use_db_field), *args, **kwargs)
@classmethod
def from_json(cls, json_data, created=False, **kwargs):
"""Converts json data to a Document instance.
:param str json_data: The json data to load into the Document.
:param bool created: Boolean defining whether to consider the newly
instantiated document as brand new or as persisted already:
* If True, consider the document as brand new, no matter what data
it's loaded with (i.e., even if an ID is loaded).
* If False and an ID is NOT provided, consider the document as
brand new.
* If False and an ID is provided, assume that the object has
already been persisted (this has an impact on the subsequent
call to .save()).
* Defaults to ``False``.
"""
# TODO should `created` default to False? If the object already exists
# in the DB, you would likely retrieve it from MongoDB itself through
# a query, not load it from JSON data.
if "json_options" not in kwargs:
warnings.warn(
"No 'json_options' are specified! Falling back to "
"LEGACY_JSON_OPTIONS with uuid_representation=PYTHON_LEGACY. "
"For use with other MongoDB drivers specify the UUID "
"representation to use. This will be changed to "
"uuid_representation=UNSPECIFIED in a future release.",
DeprecationWarning,
stacklevel=2,
)
kwargs["json_options"] = LEGACY_JSON_OPTIONS
return cls._from_son(json_util.loads(json_data, **kwargs), created=created)
def __expand_dynamic_values(self, name, value):
"""Expand any dynamic values to their correct types / values."""
if not isinstance(value, (dict, list, tuple)):
return value
# If the value is a dict with '_cls' in it, turn it into a document
is_dict = isinstance(value, dict)
if is_dict and "_cls" in value:
cls = _DocumentRegistry.get(value["_cls"])
return cls(**value)
if is_dict:
value = {k: self.__expand_dynamic_values(k, v) for k, v in value.items()}
else:
value = [self.__expand_dynamic_values(name, v) for v in value]
# Convert lists / values so we can watch for any changes on them
EmbeddedDocumentListField = _import_class("EmbeddedDocumentListField")
if isinstance(value, (list, tuple)) and not isinstance(value, BaseList):
if issubclass(type(self), EmbeddedDocumentListField):
value = EmbeddedDocumentList(value, self, name)
else:
value = BaseList(value, self, name)
elif isinstance(value, dict) and not isinstance(value, BaseDict):
value = BaseDict(value, self, name)
return value
def _mark_as_changed(self, key):
"""Mark a key as explicitly changed by the user."""
if not hasattr(self, "_changed_fields"):
return
if "." in key:
key, rest = key.split(".", 1)
key = self._db_field_map.get(key, key)
key = f"{key}.{rest}"
else:
key = self._db_field_map.get(key, key)
if key not in self._changed_fields:
levels, idx = key.split("."), 1
while idx <= len(levels):
if ".".join(levels[:idx]) in self._changed_fields:
break
idx += 1
else:
self._changed_fields.append(key)
# remove lower level changed fields
level = ".".join(levels[:idx]) + "."
remove = self._changed_fields.remove
for field in self._changed_fields[:]:
if field.startswith(level):
remove(field)
def _clear_changed_fields(self):
"""Using _get_changed_fields iterate and remove any fields that
are marked as changed.
"""
ReferenceField = _import_class("ReferenceField")
GenericReferenceField = _import_class("GenericReferenceField")
for changed in self._get_changed_fields():
parts = changed.split(".")
data = self
for part in parts:
if isinstance(data, list):
try:
data = data[int(part)]
except IndexError:
data = None
elif isinstance(data, dict):
data = data.get(part, None)
else:
field_name = data._reverse_db_field_map.get(part, part)
data = getattr(data, field_name, None)
if not isinstance(data, LazyReference) and hasattr(
data, "_changed_fields"
):
if getattr(data, "_is_document", False):
continue
data._changed_fields = []
elif isinstance(data, (list, tuple, dict)):
if hasattr(data, "field") and isinstance(
data.field, (ReferenceField, GenericReferenceField)
):
continue
BaseDocument._nestable_types_clear_changed_fields(data)
self._changed_fields = []
@staticmethod
def _nestable_types_clear_changed_fields(data):
"""Inspect nested data for changed fields
:param data: data to inspect for changes
"""
Document = _import_class("Document")
# Loop list / dict fields as they contain documents
# Determine the iterator to use
if not hasattr(data, "items"):
iterator = enumerate(data)
else:
iterator = data.items()
for _index_or_key, value in iterator:
if hasattr(value, "_get_changed_fields") and not isinstance(
value, Document
): # don't follow references
value._clear_changed_fields()
elif isinstance(value, (list, tuple, dict)):
BaseDocument._nestable_types_clear_changed_fields(value)
@staticmethod
def _nestable_types_changed_fields(changed_fields, base_key, data):
"""Inspect nested data for changed fields
:param changed_fields: Previously collected changed fields
:param base_key: The base key that must be used to prepend changes to this data
:param data: data to inspect for changes
"""
# Loop list / dict fields as they contain documents
# Determine the iterator to use
if not hasattr(data, "items"):
iterator = enumerate(data)
else:
iterator = data.items()
for index_or_key, value in iterator:
item_key = f"{base_key}{index_or_key}."
# don't check anything lower if this key is already marked
# as changed.
if item_key[:-1] in changed_fields:
continue
if hasattr(value, "_get_changed_fields"):
changed = value._get_changed_fields()
changed_fields += [f"{item_key}{k}" for k in changed if k]
elif isinstance(value, (list, tuple, dict)):
BaseDocument._nestable_types_changed_fields(
changed_fields, item_key, value
)
def _get_changed_fields(self):
"""Return a list of all fields that have explicitly been changed."""
EmbeddedDocument = _import_class("EmbeddedDocument")
LazyReferenceField = _import_class("LazyReferenceField")
ReferenceField = _import_class("ReferenceField")
GenericLazyReferenceField = _import_class("GenericLazyReferenceField")
GenericReferenceField = _import_class("GenericReferenceField")
SortedListField = _import_class("SortedListField")
changed_fields = []
changed_fields += getattr(self, "_changed_fields", [])
for field_name in self._fields_ordered:
db_field_name = self._db_field_map.get(field_name, field_name)
key = "%s." % db_field_name
data = self._data.get(field_name, None)
field = self._fields.get(field_name)
if db_field_name in changed_fields:
# Whole field already marked as changed, no need to go further
continue
if isinstance(field, ReferenceField): # Don't follow referenced documents
continue
if isinstance(data, EmbeddedDocument):
# Find all embedded fields that have been changed
changed = data._get_changed_fields()
changed_fields += [f"{key}{k}" for k in changed if k]
elif isinstance(data, (list, tuple, dict)):
if hasattr(field, "field") and isinstance(
field.field,
(
LazyReferenceField,
ReferenceField,
GenericLazyReferenceField,
GenericReferenceField,
),
):
continue
elif isinstance(field, SortedListField) and field._ordering:
# if ordering is affected whole list is changed
if any(field._ordering in d._changed_fields for d in data):
changed_fields.append(db_field_name)
continue
self._nestable_types_changed_fields(changed_fields, key, data)
return changed_fields
def _delta(self):
"""Returns the delta (set, unset) of the changes for a document.
Gets any values that have been explicitly changed.
"""
# Handles cases where not loaded from_son but has _id
doc = self.to_mongo()
set_fields = self._get_changed_fields()
unset_data = {}
if hasattr(self, "_changed_fields"):
set_data = {}
# Fetch each set item from its path
for path in set_fields:
parts = path.split(".")
d = doc
new_path = []
for p in parts:
if isinstance(d, (ObjectId, DBRef)):
# Don't dig in the references
break
elif isinstance(d, list) and p.isdigit():
# An item of a list (identified by its index) is updated
d = d[int(p)]
elif hasattr(d, "get"):
# dict-like (dict, embedded document)
d = d.get(p)
new_path.append(p)
path = ".".join(new_path)
set_data[path] = d
else:
set_data = doc
if "_id" in set_data:
del set_data["_id"]
# Determine if any changed items were actually unset.
for path, value in list(set_data.items()):
if value or isinstance(
value, (numbers.Number, bool)
): # Account for 0 and True that are truthy
continue
parts = path.split(".")
if self._dynamic and len(parts) and parts[0] in self._dynamic_fields:
del set_data[path]
unset_data[path] = 1
continue
# If we've set a value that ain't the default value don't unset it.
default = None
if path in self._fields:
default = self._fields[path].default
else: # Perform a full lookup for lists / embedded lookups
d = self
db_field_name = parts.pop()
for p in parts:
if isinstance(d, list) and p.isdigit():
d = d[int(p)]
elif hasattr(d, "__getattribute__") and not isinstance(d, dict):
real_path = d._reverse_db_field_map.get(p, p)
d = getattr(d, real_path)
else:
d = d.get(p)
if hasattr(d, "_fields"):
field_name = d._reverse_db_field_map.get(
db_field_name, db_field_name
)
if field_name in d._fields:
default = d._fields.get(field_name).default
else:
default = None
if default is not None:
default = default() if callable(default) else default
if value != default:
continue
del set_data[path]
unset_data[path] = 1
return set_data, unset_data
@classmethod
def _get_collection_name(cls):
"""Return the collection name for this class. None for abstract
class.
"""
return cls._meta.get("collection", None)
@classmethod
def _from_son(cls, son, _auto_dereference=True, created=False):
"""Create an instance of a Document (subclass) from a PyMongo SON (dict)"""
if son and not isinstance(son, dict):
raise ValueError(
"The source SON object needs to be of type 'dict' but a '%s' was found"
% type(son)
)
# Get the class name from the document, falling back to the given
# class if unavailable
class_name = son.get("_cls", cls._class_name)
# Convert SON to a data dict, making sure each key is a string and
# corresponds to the right db field.
# This is needed as _from_son is currently called both from BaseDocument.__init__
# and from EmbeddedDocumentField.to_python
data = {}
for key, value in son.items():
key = str(key)
key = cls._db_field_map.get(key, key)
data[key] = value
# Return correct subclass for document type
if class_name != cls._class_name:
cls = _DocumentRegistry.get(class_name)
errors_dict = {}
fields = cls._fields
if not _auto_dereference:
# if auto_deref is turned off, we copy the fields so
# we can mutate the auto_dereference of the fields
fields = copy.deepcopy(fields)
# Apply field-name / db-field conversion
for field_name, field in fields.items():
field.set_auto_dereferencing(
_auto_dereference
) # align the field's auto-dereferencing with the document's
if field.db_field in data:
value = data[field.db_field]
try:
data[field_name] = (
value if value is None else field.to_python(value)
)
if field_name != field.db_field:
del data[field.db_field]
except (AttributeError, ValueError) as e:
errors_dict[field_name] = e
if errors_dict:
errors = "\n".join([f"Field '{k}' - {v}" for k, v in errors_dict.items()])
msg = "Invalid data to create a `{}` instance.\n{}".format(
cls._class_name,
errors,
)
raise InvalidDocumentError(msg)
# In STRICT documents, remove any keys that aren't in cls._fields
if cls.STRICT:
data = {k: v for k, v in data.items() if k in cls._fields}
obj = cls(__auto_convert=False, _created=created, **data)
obj._changed_fields = []
if not _auto_dereference:
obj._fields = fields
return obj
@classmethod
def _build_index_specs(cls, meta_indexes):
"""Generate and merge the full index specs."""
geo_indices = cls._geo_indices()
unique_indices = cls._unique_with_indexes()
index_specs = [cls._build_index_spec(spec) for spec in meta_indexes]
def merge_index_specs(index_specs, indices):
"""Helper method for merging index specs."""
if not indices:
return index_specs
# Create a map of index fields to index spec. We're converting
# the fields from a list to a tuple so that it's hashable.
spec_fields = {tuple(index["fields"]): index for index in index_specs}
# For each new index, if there's an existing index with the same
# fields list, update the existing spec with all data from the
# new spec.
for new_index in indices:
candidate = spec_fields.get(tuple(new_index["fields"]))
if candidate is None:
index_specs.append(new_index)
else:
candidate.update(new_index)
return index_specs
# Merge geo indexes and unique_with indexes into the meta index specs.
index_specs = merge_index_specs(index_specs, geo_indices)
index_specs = merge_index_specs(index_specs, unique_indices)
return index_specs
@classmethod
def _build_index_spec(cls, spec):
"""Build a PyMongo index spec from a MongoEngine index spec."""
if isinstance(spec, str):
spec = {"fields": [spec]}
elif isinstance(spec, (list, tuple)):
spec = {"fields": list(spec)}
elif isinstance(spec, dict):
spec = dict(spec)
index_list = []
direction = None
# Check to see if we need to include _cls
allow_inheritance = cls._meta.get("allow_inheritance")
include_cls = (
allow_inheritance
and not spec.get("sparse", False)
and spec.get("cls", True)
and "_cls" not in spec["fields"]
)
# 733: don't include cls if index_cls is False unless there is an explicit cls with the index
include_cls = include_cls and (
spec.get("cls", False) or cls._meta.get("index_cls", True)
)
if "cls" in spec:
spec.pop("cls")
for key in spec["fields"]:
# If inherited spec continue
if isinstance(key, (list, tuple)):
continue
# ASCENDING from +
# DESCENDING from -
# TEXT from $
# HASHED from #
# GEOSPHERE from (
# GEOHAYSTACK from )
# GEO2D from *
direction = pymongo.ASCENDING
if key.startswith("-"):
direction = pymongo.DESCENDING
elif key.startswith("$"):
direction = pymongo.TEXT
elif key.startswith("#"):
direction = pymongo.HASHED
elif key.startswith("("):
direction = pymongo.GEOSPHERE
elif key.startswith(")"):
try:
direction = pymongo.GEOHAYSTACK
except AttributeError:
raise NotImplementedError
elif key.startswith("*"):
direction = pymongo.GEO2D
if key.startswith(("+", "-", "*", "$", "#", "(", ")")):
key = key[1:]
# Use real field name, do it manually because we need field
# objects for the next part (list field checking)
parts = key.split(".")
if parts in (["pk"], ["id"], ["_id"]):
key = "_id"
else:
fields = cls._lookup_field(parts)
parts = []
for field in fields:
try:
if field != "_id":
field = field.db_field
except AttributeError:
pass
parts.append(field)
key = ".".join(parts)
index_list.append((key, direction))
# Don't add cls to a geo index
if (
include_cls
and direction not in (pymongo.GEO2D, pymongo.GEOSPHERE)
and (GEOHAYSTACK is None or direction != GEOHAYSTACK)
):
index_list.insert(0, ("_cls", 1))
if index_list:
spec["fields"] = index_list
return spec
@classmethod
def _unique_with_indexes(cls, namespace=""):
"""Find unique indexes in the document schema and return them."""
unique_indexes = []
for field_name, field in cls._fields.items():
sparse = field.sparse
# Generate a list of indexes needed by uniqueness constraints
if field.unique:
unique_fields = [field.db_field]
# Add any unique_with fields to the back of the index spec
if field.unique_with:
if isinstance(field.unique_with, str):
field.unique_with = [field.unique_with]
# Convert unique_with field names to real field names
unique_with = []
for other_name in field.unique_with:
parts = other_name.split(".")
# Lookup real name
parts = cls._lookup_field(parts)
name_parts = [part.db_field for part in parts]
unique_with.append(".".join(name_parts))
# Unique field should be required
parts[-1].required = True
sparse = not sparse and parts[-1].name not in cls.__dict__
unique_fields += unique_with
# Add the new index to the list
fields = [(f"{namespace}{f}", pymongo.ASCENDING) for f in unique_fields]
index = {"fields": fields, "unique": True, "sparse": sparse}
unique_indexes.append(index)
if field.__class__.__name__ in {
"EmbeddedDocumentListField",
"ListField",
"SortedListField",
}:
field = field.field
# Grab any embedded document field unique indexes
if (
field.__class__.__name__ == "EmbeddedDocumentField"
and field.document_type != cls
):
field_namespace = "%s." % field_name
doc_cls = field.document_type
unique_indexes += doc_cls._unique_with_indexes(field_namespace)
return unique_indexes
@classmethod
def _geo_indices(cls, inspected=None, parent_field=None):
inspected = inspected or []
geo_indices = []
inspected.append(cls)
geo_field_type_names = (
"EmbeddedDocumentField",
"GeoPointField",
"PointField",
"LineStringField",
"PolygonField",
)
geo_field_types = tuple(_import_class(field) for field in geo_field_type_names)
for field in cls._fields.values():
if not isinstance(field, geo_field_types):
continue
if hasattr(field, "document_type"):
field_cls = field.document_type
if field_cls in inspected:
continue
if hasattr(field_cls, "_geo_indices"):
geo_indices += field_cls._geo_indices(
inspected, parent_field=field.db_field
)
elif field._geo_index:
field_name = field.db_field
if parent_field:
field_name = f"{parent_field}.{field_name}"
geo_indices.append({"fields": [(field_name, field._geo_index)]})
return geo_indices
@classmethod
def _lookup_field(cls, parts):
"""Given the path to a given field, return a list containing
the Field object associated with that field and all of its parent
Field objects.
Args:
parts (str, list, or tuple) - path to the field. Should be a
string for simple fields existing on this document or a list
of strings for a field that exists deeper in embedded documents.
Returns:
A list of Field instances for fields that were found or
strings for sub-fields that weren't.
Example:
>>> user._lookup_field('name')
[<mongoengine.fields.StringField at 0x1119bff50>]
>>> user._lookup_field('roles')
[<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>]
>>> user._lookup_field(['roles', 'role'])
[<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>,
<mongoengine.fields.StringField at 0x1119ec050>]
>>> user._lookup_field('doesnt_exist')
raises LookUpError
>>> user._lookup_field(['roles', 'doesnt_exist'])
[<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>,
'doesnt_exist']
"""
# TODO this method is WAY too complicated. Simplify it.
# TODO don't think returning a string for embedded non-existent fields is desired
ListField = _import_class("ListField")
DynamicField = _import_class("DynamicField")
if not isinstance(parts, (list, tuple)):
parts = [parts]
fields = []
field = None
for field_name in parts:
# Handle ListField indexing:
if field_name.isdigit() and isinstance(field, ListField):
fields.append(field_name)
continue
# Look up first field from the document
if field is None:
if field_name == "pk":
# Deal with "primary key" alias
field_name = cls._meta["id_field"]
if field_name in cls._fields:
field = cls._fields[field_name]
elif cls._dynamic:
field = DynamicField(db_field=field_name)
elif cls._meta.get("allow_inheritance") or cls._meta.get(
"abstract", False
):
# 744: in case the field is defined in a subclass
for subcls in cls.__subclasses__():
try:
field = subcls._lookup_field([field_name])[0]
except LookUpError:
continue
if field is not None:
break
else:
raise LookUpError('Cannot resolve field "%s"' % field_name)
else:
raise LookUpError('Cannot resolve field "%s"' % field_name)
else:
ReferenceField = _import_class("ReferenceField")
GenericReferenceField = _import_class("GenericReferenceField")
# If previous field was a reference, throw an error (we
# cannot look up fields that are on references).
if isinstance(field, (ReferenceField, GenericReferenceField)):
raise LookUpError(
"Cannot perform join in mongoDB: %s" % "__".join(parts)
)
# If the parent field has a "field" attribute which has a
# lookup_member method, call it to find the field
# corresponding to this iteration.
if hasattr(getattr(field, "field", None), "lookup_member"):
new_field = field.field.lookup_member(field_name)
# If the parent field is a DynamicField or if it's part of
# a DynamicDocument, mark current field as a DynamicField
# with db_name equal to the field name.
elif cls._dynamic and (
isinstance(field, DynamicField)
or getattr(getattr(field, "document_type", None), "_dynamic", None)
):
new_field = DynamicField(db_field=field_name)
# Else, try to use the parent field's lookup_member method
# to find the subfield.
elif hasattr(field, "lookup_member"):
new_field = field.lookup_member(field_name)
# Raise a LookUpError if all the other conditions failed.
else:
raise LookUpError(
"Cannot resolve subfield or operator {} "
"on the field {}".format(field_name, field.name)
)
# If current field still wasn't found and the parent field
# is a ComplexBaseField, add the name current field name and
# move on.
if not new_field and isinstance(field, ComplexBaseField):
fields.append(field_name)
continue
elif not new_field:
raise LookUpError('Cannot resolve field "%s"' % field_name)
field = new_field # update field to the new field type
fields.append(field)
return fields
@classmethod
def _translate_field_name(cls, field, sep="."):
"""Translate a field attribute name to a database field name."""
parts = field.split(sep)
parts = [f.db_field for f in cls._lookup_field(parts)]
return ".".join(parts)
def __set_field_display(self):
"""For each field that specifies choices, create a
get_<field>_display method.
"""
fields_with_choices = [(n, f) for n, f in self._fields.items() if f.choices]
for attr_name, field in fields_with_choices:
setattr(
self,
"get_%s_display" % attr_name,
partial(self.__get_field_display, field=field),
)
def __get_field_display(self, field):
"""Return the display value for a choice field"""
value = getattr(self, field.name)
if field.choices and isinstance(field.choices[0], (list, tuple)):
if value is None:
return None
sep = getattr(field, "display_sep", " ")
values = (
value
if field.__class__.__name__ in ("ListField", "SortedListField")
else [value]
)
return sep.join(
[str(dict(field.choices).get(val, val)) for val in values or []]
)
return value
|
BaseDocument
|
python
|
google__pytype
|
pytype/pytd/optimize.py
|
{
"start": 17405,
"end": 21153
}
|
class ____(visitors.Visitor):
"""Simplifies classes with only a __call__ function to just a method.
This transforms
class Foo:
m: Bar
class Bar:
def __call__(self: Foo, ...)
to
class Foo:
def m(self, ...)
.
"""
def __init__(self):
super().__init__()
self._module = None
self._total_count = collections.defaultdict(int)
self._processed_count = collections.defaultdict(int)
def _MaybeLookup(self, t):
if isinstance(t, pytd.NamedType):
assert self._module is not None
return self._module.Get(t.name)
elif isinstance(t, pytd.ClassType):
return t.cls
else:
return None
def _HasSelf(self, sig):
"""True if a signature has a self parameter.
This only checks for the name, since the type can be too many different
things (type of the method, type of the base class, object, unknown etc.)
and doesn't carry over to the simplified version, anyway.
Arguments:
sig: Function signature (instance of pytd.Signature)
Returns:
True if the signature has "self".
"""
return sig.params and sig.params[0].name == "self"
def _LookupIfSimpleCall(self, t):
"""Looks up the type if it has only one method, "__call__"."""
if not isinstance(t, (pytd.NamedType, pytd.ClassType)):
# We only do this for simple types.
return None
cls = self._MaybeLookup(t)
if not cls or not isinstance(cls, pytd.Class):
# This is not a class or it doesn't exist, so assume it's not a method.
return None
if [f.name for f in cls.methods] != ["__call__"]:
return None
(method,) = cls.methods
return cls if all(self._HasSelf(sig) for sig in method.signatures) else None
def _CanDelete(self, cls):
"""Checks whether this class can be deleted.
Returns whether all occurrences of this class as a type were due to
constants we removed.
Arguments:
cls: A pytd.Class.
Returns:
True if we can delete this class.
"""
if not self._processed_count[cls.name]:
# Leave standalone classes alone. E.g. the pytd files in
# stubs/builtins/ defines classes not used by anything else.
return False
return self._processed_count[cls.name] == self._total_count[cls.name]
def EnterTypeDeclUnit(self, module):
# Since modules are hierarchical, we enter TypeDeclUnits multiple times-
# but we only want to record the top-level one.
if not self._module:
self._module = module
def VisitTypeDeclUnit(self, unit):
return unit.Replace(
classes=tuple(c for c in unit.classes if not self._CanDelete(c))
)
def VisitClassType(self, t):
self._total_count[t.name] += 1
return t
def VisitNamedType(self, t):
self._total_count[t.name] += 1
return t
def VisitClass(self, cls):
"""Visit a class, and change constants to methods where possible."""
new_constants = []
new_methods = list(cls.methods)
adjust_self = visitors.AdjustSelf(force=True)
adjust_self.class_types.append(visitors.ClassAsType(cls))
# We can't do this optimization on namedtuples, as converting fields to
# methods would cause us to lose track of them.
if pytd_visitors.IsNamedTuple(cls):
return cls
for const in cls.constants:
c = self._LookupIfSimpleCall(const.type)
if c:
signatures = c.methods[0].signatures
self._processed_count[c.name] += 1
new_method = pytd.Function(const.name, signatures, c.methods[0].kind)
new_methods.append(new_method.Visit(adjust_self))
else:
new_constants.append(const) # keep
return cls.Replace(
constants=tuple(new_constants), methods=tuple(new_methods)
)
|
PullInMethodClasses
|
python
|
huggingface__transformers
|
tests/models/data2vec/test_modeling_data2vec_audio.py
|
{
"start": 24282,
"end": 26914
}
|
class ____(unittest.TestCase):
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(
lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]
)[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _load_superb(self, task, num_samples):
ds = load_dataset("anton-l/superb_dummy", task, split="test")
return ds[:num_samples]
def test_inference_ctc_normal(self):
model = Data2VecAudioForCTC.from_pretrained("facebook/data2vec-audio-base-960h")
model.to(torch_device)
processor = Wav2Vec2Processor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2", do_lower_case=True)
input_speech = self._load_datasamples(1)
input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device)
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
def test_inference_ctc_batched(self):
model = Data2VecAudioForCTC.from_pretrained("facebook/data2vec-audio-base-960h").to(torch_device)
processor = Wav2Vec2Processor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2", do_lower_case=True)
input_speech = self._load_datasamples(4)
inputs = processor(input_speech, return_tensors="pt", padding=True)
input_values = inputs.input_values.to(torch_device)
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore",
"the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around"
" him with thousands of spectators were trivialities not worth thinking about",
"his instant of panic was followed by a small sharp blow high on his chest",
]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
|
Data2VecAudioModelIntegrationTest
|
python
|
Netflix__metaflow
|
test/parallel/pytorch_parallel_test_flow.py
|
{
"start": 83,
"end": 2332
}
|
class ____(FlowSpec):
"""
Test flow to test @pytorch_parallel.
"""
num_parallel = Parameter(
"num_parallel", help="Number of nodes in cluster", default=3
)
@step
def start(self):
self.next(self.parallel_step, num_parallel=self.num_parallel)
@pytorch_parallel
@step
def parallel_step(self):
"""
Run a simple torch parallel program where each node creates a 3 x 3 tensor
with each entry equaling their rank + 1. Then, all reduce is called to sum the
tensors up.
"""
import torch
import torch.distributed as dist
# Run very simple parallel pytorch program
dist.init_process_group(
"gloo",
rank=current.parallel.node_index,
world_size=current.parallel.num_nodes,
)
# Each node creates a 3x3 matrix with values corresponding to their rank + 1
my_tensor = torch.ones(3, 3) * (dist.get_rank() + 1)
assert int(my_tensor[0, 0]) == current.parallel.node_index + 1
# Then sum the tensors up
print("Reducing tensor", my_tensor)
dist.all_reduce(my_tensor, op=dist.ReduceOp.SUM)
print("Result:", my_tensor)
# Assert the values are as expected
for i in range(3):
for j in range(3):
assert int(my_tensor[i, j]) == sum(
range(1, current.parallel.num_nodes + 1)
)
dist.destroy_process_group()
self.node_index = current.parallel.node_index
self.num_nodes = current.parallel.num_nodes
self.reduced_tensor_value = int(my_tensor[0, 0])
self.next(self.multinode_end)
@step
def multinode_end(self, inputs):
"""
Check the validity of the parallel execution.
"""
j = 0
for input in inputs:
assert input.node_index == j
assert input.num_nodes == self.num_parallel
assert input.reduced_tensor_value == sum(range(1, input.num_nodes + 1))
j += 1
assert j == self.num_parallel
self.next(self.end)
@step
def end(self):
pass
if __name__ == "__main__":
PytorchParallelTest()
|
PytorchParallelTest
|
python
|
coleifer__peewee
|
playhouse/sqlite_ext.py
|
{
"start": 1077,
"end": 1439
}
|
class ____(AutoField):
auto_increment = True
column_name = name = required_name = 'rowid'
def bind(self, model, name, *args):
if name != self.required_name:
raise ValueError('%s must be named "%s".' %
(type(self), self.required_name))
super(RowIDField, self).bind(model, name, *args)
|
RowIDField
|
python
|
numpy__numpy
|
numpy/lib/tests/test_function_base.py
|
{
"start": 85625,
"end": 86127
}
|
class ____:
def test_simple(self):
a = [1, 2, 3]
b = [1, 2, np.inf]
c = [1, 2, np.nan]
np.asarray_chkfinite(a)
assert_raises(ValueError, np.asarray_chkfinite, b)
assert_raises(ValueError, np.asarray_chkfinite, c)
def test_dtype_order(self):
# Regression test for missing dtype and order arguments
a = [1, 2, 3]
a = np.asarray_chkfinite(a, order='F', dtype=np.float64)
assert_(a.dtype == np.float64)
|
TestCheckFinite
|
python
|
apache__airflow
|
providers/standard/src/airflow/providers/standard/triggers/external_task.py
|
{
"start": 1377,
"end": 7588
}
|
class ____(BaseTrigger):
"""
A trigger to monitor tasks, task group and dag execution in Apache Airflow.
:param external_dag_id: The ID of the external dag.
:param run_ids: A list of run ids for the external dag.
:param external_task_ids: A collection of external task IDs to wait for.
:param external_task_group_id: The ID of the external task group to wait for.
:param failed_states: States considered as failed for external tasks.
:param skipped_states: States considered as skipped for external tasks.
:param allowed_states: States considered as successful for external tasks.
:param poke_interval: The interval (in seconds) for poking the external tasks.
:param soft_fail: If True, the trigger will not fail the entire dag on external task failure.
:param logical_dates: A list of logical dates for the external dag.
"""
def __init__(
self,
external_dag_id: str,
run_ids: list[str] | None = None,
execution_dates: list[datetime] | None = None,
logical_dates: list[datetime] | None = None,
external_task_ids: typing.Collection[str] | None = None,
external_task_group_id: str | None = None,
failed_states: Collection[str] | None = None,
skipped_states: Collection[str] | None = None,
allowed_states: Collection[str] | None = None,
poke_interval: float = 2.0,
soft_fail: bool = False,
**kwargs,
):
self.external_dag_id = external_dag_id
self.external_task_ids = external_task_ids
self.external_task_group_id = external_task_group_id
self.failed_states = failed_states
self.skipped_states = skipped_states
self.allowed_states = allowed_states
self.run_ids = run_ids
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.execution_dates = execution_dates
self.logical_dates = logical_dates
super().__init__(**kwargs)
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize the trigger param and module path."""
data: dict[str, typing.Any] = {
"external_dag_id": self.external_dag_id,
"external_task_ids": self.external_task_ids,
"external_task_group_id": self.external_task_group_id,
"failed_states": self.failed_states,
"skipped_states": self.skipped_states,
"allowed_states": self.allowed_states,
"poke_interval": self.poke_interval,
"soft_fail": self.soft_fail,
}
if AIRFLOW_V_3_0_PLUS:
data["run_ids"] = self.run_ids
data["logical_dates"] = self.logical_dates
else:
data["execution_dates"] = self.execution_dates
return "airflow.providers.standard.triggers.external_task.WorkflowTrigger", data
async def run(self) -> typing.AsyncIterator[TriggerEvent]:
"""Check periodically tasks, task group or dag status."""
if AIRFLOW_V_3_0_PLUS:
get_count_func = self._get_count_af_3
run_id_or_dates = (self.run_ids or self.logical_dates) or []
else:
get_count_func = self._get_count
run_id_or_dates = self.execution_dates or []
while True:
if self.failed_states:
failed_count = await get_count_func(self.failed_states)
if failed_count > 0:
yield TriggerEvent({"status": "failed"})
return
if self.skipped_states:
skipped_count = await get_count_func(self.skipped_states)
if skipped_count > 0:
yield TriggerEvent({"status": "skipped"})
return
allowed_count = await get_count_func(self.allowed_states)
if allowed_count == len(run_id_or_dates):
yield TriggerEvent({"status": "success"})
return
self.log.info("Sleeping for %s seconds", self.poke_interval)
await asyncio.sleep(self.poke_interval)
async def _get_count_af_3(self, states: Collection[str] | None) -> int:
from airflow.providers.standard.utils.sensor_helper import _get_count_by_matched_states
from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance
if self.external_task_ids:
count = await sync_to_async(RuntimeTaskInstance.get_ti_count)(
dag_id=self.external_dag_id,
task_ids=list(self.external_task_ids),
logical_dates=self.logical_dates,
run_ids=self.run_ids,
states=list(states) if states else None,
)
return int(count / len(self.external_task_ids))
if self.external_task_group_id:
run_id_task_state_map = await sync_to_async(RuntimeTaskInstance.get_task_states)(
dag_id=self.external_dag_id,
task_group_id=self.external_task_group_id,
logical_dates=self.logical_dates,
run_ids=self.run_ids,
)
count = await sync_to_async(_get_count_by_matched_states)(
run_id_task_state_map=run_id_task_state_map,
states=states or [],
)
return count
count = await sync_to_async(RuntimeTaskInstance.get_dr_count)(
dag_id=self.external_dag_id,
logical_dates=self.logical_dates,
run_ids=self.run_ids,
states=list(states) if states else None,
)
return count
@sync_to_async
def _get_count(self, states: Collection[str] | None) -> int:
"""
Get the count of records against dttm filter and states. Async wrapper for _get_count.
:param states: task or dag states
:return The count of records.
"""
return _get_count(
dttm_filter=self.run_ids if AIRFLOW_V_3_0_PLUS else self.execution_dates,
external_task_ids=self.external_task_ids,
external_task_group_id=self.external_task_group_id,
external_dag_id=self.external_dag_id,
states=states,
)
|
WorkflowTrigger
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/models/test_mappers.py
|
{
"start": 3822,
"end": 4145
}
|
class ____:
def test_basic(self) -> None:
mapper = bmm.LinearColorMapper()
check_properties_existence(mapper, [
"palette",
"domain",
"low",
"high",
"low_color",
"high_color",
"nan_color"],
)
|
Test_LinearColorMapper
|
python
|
PyCQA__pylint
|
tests/functional/u/used/used_before_assignment.py
|
{
"start": 3677,
"end": 4306
}
|
class ____: # pylint: disable=invalid-name, too-few-public-methods, undefined-variable
'''Issue #8754, no crash from unexpected assignment between attribute and variable'''
T.attr = attr
if outer():
NOT_ALWAYS_DEFINED = True
print(NOT_ALWAYS_DEFINED) # [used-before-assignment]
def inner_if_continues_outer_if_has_no_other_statements():
for i in range(5):
if isinstance(i, int):
# Testing no assignment here, before the inner if
if i % 2 == 0:
order = None
else:
continue
else:
order = None
print(order)
|
T
|
python
|
modin-project__modin
|
modin/core/dataframe/pandas/interchange/dataframe_protocol/exception.py
|
{
"start": 894,
"end": 1035
}
|
class ____(Exception):
"""Exception to be raised if there is no validity buffer for ``PandasProtocolColumn``."""
pass
|
NoValidityBuffer
|
python
|
pytorch__pytorch
|
test/test_file_check.py
|
{
"start": 140,
"end": 1369
}
|
class ____(TestCase):
def test_not_run(self):
stdout, _ = self.run_process_no_exception(
"""\
from torch.testing import FileCheck
file_check = FileCheck().check("not run")
del file_check
""",
)
FileCheck().check("You have not run this instance of FileCheck!").check_next(
"FileCheck checks:"
).check_next("\tCHECK: not run").run(stdout)
def test_all_python_api(self):
test_string = """
check check_same
check_next
check_count
check_dag
check_source_highlighted
~~~~~~~~~~~~~~~~~~~~~~~~
check_regex
"""
FileCheck().check("check").check_not("check_not").check_same(
"check_same"
).check_next("check_next").check_count("check_count", 1).check_dag(
"check_dag"
).check_source_highlighted("check_source_highlighted").check_regex(
r"check_.+"
).run(test_string)
FileCheck().run(
"""
# CHECK: check
# CHECK-NOT: check_not
# CHECK-SAME: check_same
# CHECK-NEXT: check_next
# CHECK-DAG: check_dag
# CHECK-SOURCE-HIGHLIGHTED: check_source_highlighted
# CHECK-REGEX: check_.+
""",
test_string,
)
if __name__ == "__main__":
run_tests()
|
TestFileCheck
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-iterable/source_iterable/streams.py
|
{
"start": 18745,
"end": 18840
}
|
class ____(IterableExportEventsStreamAdjustableRange):
data_field = "customEvent"
|
CustomEvent
|
python
|
huggingface__transformers
|
src/transformers/models/xlm_roberta/modular_xlm_roberta.py
|
{
"start": 9393,
"end": 12977
}
|
class ____(RobertaForSequenceClassification):
def __init__(self, config):
super().__init__(config)
del self.xlm_roberta
self.roberta = XLMRobertaModel(config, add_pooling_layer=False)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
|
XLMRobertaForSequenceClassification
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/envs/tabular/cliffwalking.py
|
{
"start": 12591,
"end": 13648
}
|
class ____(FunctionalJaxEnv, EzPickle):
"""A Gymnasium Env wrapper for the functional cliffwalking env."""
metadata = {"render_modes": ["rgb_array"], "render_fps": 50, "jax": True}
def __init__(self, render_mode: str | None = None, **kwargs):
"""Initializes Gym wrapper for cliffwalking functional env."""
EzPickle.__init__(self, render_mode=render_mode, **kwargs)
env = CliffWalkingFunctional(**kwargs)
env.transform(jax.jit)
super().__init__(
env,
metadata=self.metadata,
render_mode=render_mode,
)
if __name__ == "__main__":
"""
Temporary environment tester function.
"""
env = HumanRendering(CliffWalkingJaxEnv(render_mode="rgb_array"))
obs, info = env.reset()
print(obs, info)
terminal = False
while not terminal:
action = int(input("Please input an action\n"))
obs, reward, terminal, truncated, info = env.step(action)
print(obs, reward, terminal, truncated, info)
exit()
|
CliffWalkingJaxEnv
|
python
|
PyCQA__pylint
|
tests/functional/t/too/too_few_public_methods.py
|
{
"start": 592,
"end": 852
}
|
class ____:
"""A class can define only special methods."""
def __init__(self, iterable):
self._list = list(iterable)
def __len__(self):
return len(self._list)
def __getitem__(self, index):
return self._list[index]
|
DumbList
|
python
|
pypa__warehouse
|
tests/unit/admin/views/test_projects.py
|
{
"start": 2286,
"end": 4965
}
|
class ____:
def test_gets_project(self, db_request):
project = ProjectFactory.create()
journals = sorted(
JournalEntryFactory.create_batch(75, name=project.name),
key=lambda x: (x.submitted_date, x.id),
reverse=True,
)
roles = sorted(
RoleFactory.create_batch(5, project=project),
key=lambda x: (x.role_name, x.user.username),
)
oidc_publishers = GitHubPublisherFactory.create_batch(5, projects=[project])
db_request.matchdict["project_name"] = str(project.normalized_name)
result = views.project_detail(project, db_request)
assert result == {
"project": project,
"releases": [],
"maintainers": roles,
"journal": journals[:30],
"oidc_publishers": oidc_publishers,
"ONE_MIB": views.ONE_MIB,
"MAX_FILESIZE": warehouse.constants.MAX_FILESIZE,
"MAX_PROJECT_SIZE": warehouse.constants.MAX_PROJECT_SIZE,
"ONE_GIB": views.ONE_GIB,
"UPLOAD_LIMIT_CAP": views.UPLOAD_LIMIT_CAP,
"observation_kinds": ObservationKind,
"observations": [],
}
def test_non_normalized_name(self, db_request):
project = ProjectFactory.create(name="NotNormalized")
db_request.matchdict["project_name"] = str(project.name)
db_request.current_route_path = pretend.call_recorder(
lambda *a, **kw: "/admin/projects/the-redirect/"
)
with pytest.raises(HTTPMovedPermanently):
views.project_detail(project, db_request)
def test_with_organization(self, db_request):
from ....common.db.organizations import (
OrganizationFactory,
OrganizationProjectFactory,
)
organization = OrganizationFactory.create(
upload_limit=150 * views.ONE_MIB,
total_size_limit=100 * views.ONE_GIB,
)
org_project = OrganizationProjectFactory.create(organization=organization)
project = org_project.project
project.upload_limit = 50 * views.ONE_MIB
project.total_size_limit = 50 * views.ONE_GIB
db_request.matchdict["project_name"] = str(project.normalized_name)
result = views.project_detail(project, db_request)
assert result["project"] == project
assert project.organization == organization
# Verify that the organization limits are accessible through the project
assert project.organization.upload_limit == 150 * views.ONE_MIB
assert project.organization.total_size_limit == 100 * views.ONE_GIB
|
TestProjectDetail
|
python
|
pytorch__pytorch
|
test/dynamo/test_subclasses.py
|
{
"start": 47872,
"end": 48620
}
|
class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 4]"):
l_x_ = L_x_
wrap_body_0 = self.wrap_body_0
wrap = torch.ops.higher_order.wrap(wrap_body_0, l_x_); wrap_body_0 = l_x_ = None
getitem: "f32[3, 4]" = wrap[0]; wrap = None
return (getitem,)
class wrap_body_0(torch.nn.Module):
def forward(self, l_x_: "f32[3, 4]"):
add_: "f32[3, 4]" = l_x_.add_(1.0); l_x_ = None
return (add_,)
""",
)
ff = torch.func.functionalize(f)
ff_out = ff(t_clone) # noqa: F841
# frame count and op count are incremented due to re-compilation
check_count_and_graph(
2,
4,
2,
"""\
|
GraphModule
|
python
|
huggingface__transformers
|
src/transformers/models/encodec/modeling_encodec.py
|
{
"start": 2586,
"end": 2867
}
|
class ____(ModelOutput):
r"""
audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
Decoded audio values, obtained using the decoder part of Encodec.
"""
audio_values: Optional[torch.FloatTensor] = None
|
EncodecDecoderOutput
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/testing/jpl_units/UnitDbl.py
|
{
"start": 87,
"end": 5882
}
|
class ____:
"""Class UnitDbl in development."""
# Unit conversion table. Small subset of the full one but enough
# to test the required functions. First field is a scale factor to
# convert the input units to the units of the second field. Only
# units in this table are allowed.
allowed = {
"m": (0.001, "km"),
"km": (1, "km"),
"mile": (1.609344, "km"),
"rad": (1, "rad"),
"deg": (1.745329251994330e-02, "rad"),
"sec": (1, "sec"),
"min": (60.0, "sec"),
"hour": (3600, "sec"),
}
_types = {
"km": "distance",
"rad": "angle",
"sec": "time",
}
def __init__(self, value, units):
"""
Create a new UnitDbl object.
Units are internally converted to km, rad, and sec. The only
valid inputs for units are [m, km, mile, rad, deg, sec, min, hour].
The field UnitDbl.value will contain the converted value. Use
the convert() method to get a specific type of units back.
= ERROR CONDITIONS
- If the input units are not in the allowed list, an error is thrown.
= INPUT VARIABLES
- value The numeric value of the UnitDbl.
- units The string name of the units the value is in.
"""
data = _api.check_getitem(self.allowed, units=units)
self._value = float(value * data[0])
self._units = data[1]
def convert(self, units):
"""
Convert the UnitDbl to a specific set of units.
= ERROR CONDITIONS
- If the input units are not in the allowed list, an error is thrown.
= INPUT VARIABLES
- units The string name of the units to convert to.
= RETURN VALUE
- Returns the value of the UnitDbl in the requested units as a floating
point number.
"""
if self._units == units:
return self._value
data = _api.check_getitem(self.allowed, units=units)
if self._units != data[1]:
raise ValueError(f"Error trying to convert to different units.\n"
f" Invalid conversion requested.\n"
f" UnitDbl: {self}\n"
f" Units: {units}\n")
return self._value / data[0]
def __abs__(self):
"""Return the absolute value of this UnitDbl."""
return UnitDbl(abs(self._value), self._units)
def __neg__(self):
"""Return the negative value of this UnitDbl."""
return UnitDbl(-self._value, self._units)
def __bool__(self):
"""Return the truth value of a UnitDbl."""
return bool(self._value)
def _cmp(self, op, rhs):
"""Check that *self* and *rhs* share units; compare them using *op*."""
self.checkSameUnits(rhs, "compare")
return op(self._value, rhs._value)
__eq__ = functools.partialmethod(_cmp, operator.eq)
__ne__ = functools.partialmethod(_cmp, operator.ne)
__lt__ = functools.partialmethod(_cmp, operator.lt)
__le__ = functools.partialmethod(_cmp, operator.le)
__gt__ = functools.partialmethod(_cmp, operator.gt)
__ge__ = functools.partialmethod(_cmp, operator.ge)
def _binop_unit_unit(self, op, rhs):
"""Check that *self* and *rhs* share units; combine them using *op*."""
self.checkSameUnits(rhs, op.__name__)
return UnitDbl(op(self._value, rhs._value), self._units)
__add__ = functools.partialmethod(_binop_unit_unit, operator.add)
__sub__ = functools.partialmethod(_binop_unit_unit, operator.sub)
def _binop_unit_scalar(self, op, scalar):
"""Combine *self* and *scalar* using *op*."""
return UnitDbl(op(self._value, scalar), self._units)
__mul__ = functools.partialmethod(_binop_unit_scalar, operator.mul)
__rmul__ = functools.partialmethod(_binop_unit_scalar, operator.mul)
def __str__(self):
"""Print the UnitDbl."""
return f"{self._value:g} *{self._units}"
def __repr__(self):
"""Print the UnitDbl."""
return f"UnitDbl({self._value:g}, '{self._units}')"
def type(self):
"""Return the type of UnitDbl data."""
return self._types[self._units]
@staticmethod
def range(start, stop, step=None):
"""
Generate a range of UnitDbl objects.
Similar to the Python range() method. Returns the range [
start, stop) at the requested step. Each element will be a
UnitDbl object.
= INPUT VARIABLES
- start The starting value of the range.
- stop The stop value of the range.
- step Optional step to use. If set to None, then a UnitDbl of
value 1 w/ the units of the start is used.
= RETURN VALUE
- Returns a list containing the requested UnitDbl values.
"""
if step is None:
step = UnitDbl(1, start._units)
elems = []
i = 0
while True:
d = start + i * step
if d >= stop:
break
elems.append(d)
i += 1
return elems
def checkSameUnits(self, rhs, func):
"""
Check to see if units are the same.
= ERROR CONDITIONS
- If the units of the rhs UnitDbl are not the same as our units,
an error is thrown.
= INPUT VARIABLES
- rhs The UnitDbl to check for the same units
- func The name of the function doing the check.
"""
if self._units != rhs._units:
raise ValueError(f"Cannot {func} units of different types.\n"
f"LHS: {self._units}\n"
f"RHS: {rhs._units}")
|
UnitDbl
|
python
|
google__jax
|
jax/_src/pallas/pipelining/schedulers.py
|
{
"start": 11589,
"end": 20563
}
|
class ____(Protocol):
def __call__(
self,
ctx: PipelineContext,
stage: internal.PipelineStage,
args: Sequence[Any],
) -> PipelineState:
...
def eval_stage(ctx: PipelineContext, stage: internal.PipelineStage, args
) -> PipelineState:
"""Evaluates a single stage."""
flat_ctx = jax.tree.leaves(ctx)
state_tree = jax.tree.structure(ctx.pipeline_state)
next_state = jax_core.eval_jaxpr(
stage.jaxpr.jaxpr, stage.jaxpr.consts, *flat_ctx, *args
)
if next_state:
return jax.tree.unflatten(state_tree, next_state)
return ctx.pipeline_state
def linearize_stages(stages: Sequence[internal.PipelineStage]
) -> Sequence[internal.PipelineStage]:
"""Computes a linearization of the pipeline stages."""
linearized_stages = []
outputs_written = set()
available_stages = stages
while available_stages:
stage_added = False
new_available_stages = list(available_stages)
for stage in available_stages:
if all(read_idx in outputs_written for read_idx in stage.get_read_idxs()):
linearized_stages.append(stage)
outputs_written.update(stage.get_write_idxs())
stage_added = True
new_available_stages.remove(stage)
available_stages = new_available_stages
if not stage_added:
raise ValueError(
"Failed to linearize pipeline stages. Could not linearize"
f" {available_stages=}")
return linearized_stages
def make_ctx(stage: internal.PipelineStage,
stage_idx: int,
scoreboard: Scoreboard,
pipeline_state: PipelineState,
grid_carry: GridCarry | None = None,
grid: Sequence[int] | None = None,
offset: int | jax.Array = 0) -> PipelineContext:
del stage
step = scoreboard.stage_counters[stage_idx] + offset
if grid_carry is not None:
loop_index = grid_carry.get_indices_for_stage(stage_idx)
else:
loop_index = compute_grid_indices(step, grid)
return PipelineContext(loop_index=loop_index,
linearized_index=step,
pipeline_state=pipeline_state)
# TODO(justinfu): Implement a second version that rolls more of the pipeline
# into the loop body to reduce code size.
def static_nd_loop_scheduler(
nd_loop: internal.NDLoopStruct,
args: Sequence[Any],
initial_state: PipelineState | None = None,
eval_fn: EvalStageFunc | None = None,
):
"""Schedules and emits the pipeline into a single instruction stream.
This scheduler is static in the sense that most of the control logic is
implemented in Python and run at JAX tracing time. This reduce scalar
core pressure as the scoreboarding logic does not have to be computed
at runtime.
"""
if eval_fn is None:
eval_fn = eval_stage
stages = linearize_stages(nd_loop.stages)
num_stages = len(stages)
num_itrs = np.prod(nd_loop.grid)
check_pipeline(stages)
scoreboard = Scoreboard.create(stages)
def can_run_stage(
stage: internal.PipelineStage,
scoreboard: Scoreboard,
new_scoreboard: Scoreboard,
current_stage_counter: int | jax.Array,
) -> bool | jax.Array:
can_run = True
# Check args ready.
can_run = can_run & check_args_ready(
stage, scoreboard, new_scoreboard, current_stage_counter)
# Check dependents
if stage.properties.is_async_start:
can_run = can_run & check_async_start(
stage, scoreboard, current_stage_counter,
)
if stage.properties.is_async_done:
can_run = can_run & check_async_done(
stage, scoreboard, num_itrs, current_stage_counter)
return can_run
def compute_offsets(scoreboard: Scoreboard) -> Sequence[int] | None:
while any(scoreboard.stage_counters[i] < 1 for i in range(num_stages)):
new_scoreboard = scoreboard.copy()
for stage_idx, stage in enumerate(stages):
current_stage_counter = scoreboard.stage_counters[stage_idx]
can_run = can_run_stage(
stage, scoreboard, new_scoreboard, current_stage_counter
)
if can_run:
new_scoreboard.increment_stage_counter(stage_idx)
if scoreboard.stage_counters == new_scoreboard.stage_counters:
raise ValueError("Scheduling error. No stages ran.")
scoreboard = new_scoreboard
min_stage = min(scoreboard.stage_counters)
offsets = [
scoreboard.stage_counters[i] - min_stage for i in range(num_stages)
]
if max(offsets) > num_itrs:
# Bail out, since we won't be running the main loop.
return None
return offsets
# Main loop stage iteration offsets.
# This is a list of integers containing the number of iterations each
# stage is ahead of the slowest stage.
offsets = compute_offsets(scoreboard)
# Static prologue
# This runs the pipeline up until the steady state.
pipeline_state = initial_state
with jax.named_scope("pipeline_prologue"):
while any(
scoreboard.stage_counters[i] < (offsets[i] if offsets else 1)
for i in range(num_stages)
):
new_scoreboard = scoreboard.copy()
for stage_idx, stage in enumerate(stages):
current_stage_counter = scoreboard.stage_counters[stage_idx]
if offsets:
can_run = current_stage_counter < offsets[stage_idx]
else:
can_run = current_stage_counter < num_itrs
can_run = can_run & can_run_stage(
stage, scoreboard, new_scoreboard, current_stage_counter
)
if can_run:
pipeline_state = eval_fn(
make_ctx(
stage, stage_idx, scoreboard, pipeline_state,
grid=nd_loop.grid,
),
stage,
args,
)
new_scoreboard.increment_stage_counter(stage_idx)
if scoreboard.stage_counters == new_scoreboard.stage_counters:
raise ValueError("Scheduling error. No stages ran.")
scoreboard = new_scoreboard
if offsets:
assert all(
scoreboard.stage_counters[i] == offsets[i] for i in range(num_stages)
), (
f"Scheduling error. Scoreboard {scoreboard.stage_counters} does not"
f" match computed offsets {offsets}"
)
# Dynamic loop body.
# This runs the steady state of the pipeline where all stages run with
# no control flow.
@jax.named_scope("pipeline_steady_state")
def loop_body(itr: jax.Array, carry: tuple[PipelineState, GridCarry]):
pipeline_state, grid_carry = carry
stages_left = list(stages)
old_scoreboard = scoreboard.copy()
while any(stages_left):
new_scoreboard = old_scoreboard.copy()
for stage_idx, stage in enumerate(stages_left):
if stage is None:
continue
current_stage_counter = old_scoreboard.stage_counters[stage_idx]
can_run = can_run_stage(
stage, old_scoreboard, new_scoreboard, current_stage_counter
)
if can_run:
pipeline_state = eval_fn(
make_ctx(
stage,
stage_idx,
old_scoreboard,
pipeline_state,
grid_carry=grid_carry,
offset=itr,
),
stage,
args,
)
new_scoreboard.increment_stage_counter(stage_idx)
stages_left[stage_idx] = None
old_scoreboard = new_scoreboard
return (pipeline_state, grid_carry.next())
num_loop_itrs = int(max(num_itrs - max(scoreboard.stage_counters), 0))
if offsets:
grid_carry = GridCarry.init(
offsets=offsets, grid=nd_loop.grid, dynamic=True)
init_carry = (pipeline_state, grid_carry)
final_carry = jax.lax.fori_loop(0, num_loop_itrs, loop_body, init_carry)
(pipeline_state, _) = final_carry
# Update the static scoreboard to reflect the fact that each stage ran
# num_loop_itrs times.
for stage_idx in range(len(stages)):
scoreboard.stage_counters[stage_idx] += num_loop_itrs
# Static epilogue
with jax.named_scope("pipeline_epilogue"):
while any(
scoreboard.stage_counters[i] < num_itrs for i in range(num_stages)
):
new_scoreboard = scoreboard.copy()
for stage_idx, stage in enumerate(stages):
current_stage_counter = scoreboard.stage_counters[stage_idx]
can_run = current_stage_counter < num_itrs
can_run = can_run & can_run_stage(
stage, scoreboard, new_scoreboard, current_stage_counter
)
if can_run:
pipeline_state = eval_fn(
make_ctx(
stage, stage_idx, scoreboard, pipeline_state,
grid=nd_loop.grid,
),
stage,
args,
)
new_scoreboard.increment_stage_counter(stage_idx)
if scoreboard.stage_counters == new_scoreboard.stage_counters:
raise ValueError("Scheduling error. No stages ran.")
scoreboard = new_scoreboard
|
EvalStageFunc
|
python
|
milvus-io__pymilvus
|
pymilvus/bulk_writer/bulk_writer.py
|
{
"start": 1086,
"end": 14325
}
|
class ____:
def __init__(
self,
schema: CollectionSchema,
chunk_size: int,
file_type: BulkFileType,
config: Optional[dict] = None,
**kwargs,
):
self._schema = schema
self._buffer_size = 0
self._buffer_row_count = 0
self._total_row_count = 0
self._file_type = file_type
self._buffer_lock = Lock()
self._config = config
# the old parameter segment_size is changed to chunk_size, compatible with the legacy code
self._chunk_size = chunk_size
segment_size = kwargs.get("segment_size", 0)
if segment_size > 0:
self._chunk_size = segment_size
if len(self._schema.fields) == 0:
self._throw("collection schema fields list is empty")
if self._schema.primary_field is None:
self._throw("primary field is null")
self._buffer = None
self._new_buffer()
@property
def buffer_size(self):
return self._buffer_size
@property
def buffer_row_count(self):
return self._buffer_row_count
@property
def total_row_count(self):
return self._total_row_count
@property
def chunk_size(self):
return self._chunk_size
def _new_buffer(self):
old_buffer = self._buffer
with self._buffer_lock:
self._buffer = Buffer(self._schema, self._file_type, self._config)
return old_buffer
def append_row(self, row: dict, **kwargs):
self._verify_row(row)
with self._buffer_lock:
self._buffer.append_row(row)
def commit(self, **kwargs):
with self._buffer_lock:
self._buffer_size = 0
self._buffer_row_count = 0
@property
def data_path(self):
return ""
def _try_convert_json(self, field_name: str, obj: object):
if isinstance(obj, str):
try:
return json.loads(obj)
except Exception as e:
self._throw(
f"Illegal JSON value for field '{field_name}', type mismatch or illegal format, error: {e}"
)
return obj
def _throw(self, msg: str):
logger.error(msg)
raise MilvusException(message=msg)
def _verify_vector(self, x: object, field: FieldSchema):
dtype = DataType(field.dtype)
validator = TYPE_VALIDATOR[dtype.name]
if dtype != DataType.SPARSE_FLOAT_VECTOR:
dim = field.params["dim"]
try:
origin_list = validator(x, dim)
if dtype == DataType.FLOAT_VECTOR:
return origin_list, dim * 4 # for float vector, each dim occupies 4 bytes
if dtype in [DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR]:
return (
origin_list,
dim * 2,
) # for float16 or bfloat16 vector, each dim occupies 2 bytes
if dtype == DataType.INT8_VECTOR:
return origin_list, dim # for int8 vector, each dim occupies 1 bytes
if dtype == DataType.BINARY_VECTOR:
return origin_list, dim / 8 # for binary vector, 8 dim occupies 1 byte
self._throw(f"Illegal vector data type for vector field: '{field.name}'")
except MilvusException as e:
self._throw(f"Illegal vector data for vector field: '{field.name}': {e.message}")
else:
try:
validator(x)
return x, len(x) * 12 # for sparse vector, each key-value is int-float, 12 bytes
except MilvusException as e:
self._throw(f"Illegal vector data for vector field: '{field.name}': {e.message}")
def _verify_json(self, x: object, field: FieldSchema):
size = 0
validator = TYPE_VALIDATOR[DataType.JSON.name]
if isinstance(x, str):
size = len(x)
x = self._try_convert_json(field.name, x)
elif validator(x):
size = len(json.dumps(x))
else:
self._throw(f"Illegal JSON value for field '{field.name}', type mismatch")
return x, size
def _verify_varchar(self, x: object, field: FieldSchema):
max_len = field.params["max_length"]
validator = TYPE_VALIDATOR[DataType.VARCHAR.name]
if not validator(x, max_len):
self._throw(
f"Illegal varchar value for field '{field.name}',"
f" length exceeds {max_len} or type mismatch"
)
return len(x)
def _verify_scalar(self, x: object, dtype: DataType, field_name: str):
validator = TYPE_VALIDATOR[dtype.name]
if not validator(x):
self._throw(
f"Illegal scalar value for field '{field_name}', value overflow or type mismatch"
)
if isinstance(x, str):
return len(x)
return TYPE_SIZE[dtype.name]
def _verify_array(self, x: object, field: FieldSchema):
max_capacity = field.params["max_capacity"]
element_type = field.element_type
validator = TYPE_VALIDATOR[DataType.ARRAY.name]
if not validator(x, max_capacity):
self._throw(
f"Illegal array value for field '{field.name}', length exceeds capacity or type mismatch"
)
row_size = 0
if element_type.name in TYPE_SIZE:
row_size = TYPE_SIZE[element_type.name] * len(x)
for ele in x:
self._verify_scalar(ele, element_type, field.name)
elif element_type == DataType.VARCHAR:
for ele in x:
row_size = row_size + self._verify_varchar(ele, field)
else:
self._throw(f"Unsupported element type for array field '{field.name}'")
return row_size
def _verify_normal_field(self, row: dict):
row_size = 0
for field in self._schema.fields:
if field.is_primary and field.auto_id:
if field.name in row:
self._throw(
f"The primary key field '{field.name}' is auto-id, no need to provide"
)
else:
continue
if field.is_function_output:
if field.name in row:
self._throw(f"Field '{field.name}' is function output, no need to provide")
else:
continue
dtype = DataType(field.dtype)
# deal with null (None) according to the Applicable rules in this page:
# https://milvus.io/docs/nullable-and-default.md#Nullable--Default
if field.nullable:
if (
field.default_value is not None
and field.default_value.WhichOneof("data") is not None
):
# 1: nullable is true, default_value is not null, user_input is null
# replace the value by default value
if (field.name not in row) or (row[field.name] is None):
data_type = field.default_value.WhichOneof("data")
row[field.name] = getattr(field.default_value, data_type)
continue
# 2: nullable is true, default_value is not null, user_input is not null
# check and set the value
# 3: nullable is true, default_value is null, user_input is null
# do nothing
elif (field.name not in row) or (row[field.name] is None):
row[field.name] = None
continue
# 4: nullable is true, default_value is null, user_input is not null
# check and set the value
elif (
field.default_value is not None
and field.default_value.WhichOneof("data") is not None
):
# 5: nullable is false, default_value is not null, user_input is null
# replace the value by default value
if (field.name not in row) or (row[field.name] is None):
data_type = field.default_value.WhichOneof("data")
row[field.name] = getattr(field.default_value, data_type)
continue
# 6: nullable is false, default_value is not null, user_input is not null
# check and set the value
# 7: nullable is false, default_value is not null, user_input is null
# raise an exception
elif (field.name not in row) or (row[field.name] is None):
self._throw(f"The field '{field.name}' is not nullable, not allow None value")
# 8: nullable is false, default_value is null, user_input is not null
# check and set the value
# check and set value, calculate size of this row
if dtype in {
DataType.BINARY_VECTOR,
DataType.FLOAT_VECTOR,
DataType.FLOAT16_VECTOR,
DataType.BFLOAT16_VECTOR,
DataType.SPARSE_FLOAT_VECTOR,
DataType.INT8_VECTOR,
}:
origin_list, byte_len = self._verify_vector(row[field.name], field)
row[field.name] = origin_list
row_size = row_size + byte_len
elif dtype == DataType.VARCHAR:
row_size = row_size + self._verify_varchar(row[field.name], field)
elif dtype == DataType.JSON:
row[field.name], size = self._verify_json(row[field.name], field)
row_size = row_size + size
elif dtype == DataType.ARRAY:
if isinstance(row[field.name], np.ndarray):
row[field.name] = row[field.name].tolist()
row_size = row_size + self._verify_array(row[field.name], field)
else:
if isinstance(row[field.name], np.generic):
row[field.name] = row[field.name].item()
row_size = row_size + self._verify_scalar(row[field.name], dtype, field.name)
return row_size
def _verify_struct(self, x: object, field: StructFieldSchema):
validator = TYPE_VALIDATOR[DataType.STRUCT.name]
if not validator(x, field.max_capacity):
self._throw(
f"Illegal value for struct field '{field.name}', length exceeds capacity or type mismatch"
)
struct_size = 0
for sub_field in field.fields:
sub_dtype = DataType(sub_field.dtype)
for obj in x:
if sub_field.name not in obj:
self._throw(
f"Sub field '{sub_field.name}' of struct field '{field.name}' is missed"
)
if sub_dtype == DataType.FLOAT_VECTOR:
origin_list, byte_len = self._verify_vector(obj[sub_field.name], sub_field)
obj[sub_field.name] = np.array(
origin_list, dtype=NUMPY_TYPE_CREATOR[DataType.FLOAT.name]
)
struct_size = struct_size + byte_len
elif sub_dtype == DataType.VARCHAR:
struct_size = struct_size + self._verify_varchar(obj[sub_field.name], sub_field)
elif sub_dtype in {
DataType.BOOL,
DataType.INT8,
DataType.INT16,
DataType.INT32,
DataType.INT64,
DataType.FLOAT,
DataType.DOUBLE,
}:
if isinstance(obj[sub_field.name], np.generic):
obj[sub_field.name] = obj[sub_field.name].item()
struct_size = struct_size + self._verify_scalar(
obj[sub_field.name], sub_dtype, sub_field.name
)
obj[sub_field.name] = NUMPY_TYPE_CREATOR[sub_dtype.name].type(
obj[sub_field.name]
)
else:
self._throw(f"Unsupported field type '{sub_dtype.name}' for struct field")
return struct_size
def _verify_struct_field(self, row: dict):
structs_size = 0
for field in self._schema.struct_fields:
if field.name not in row:
self._throw(f"The struct field '{field.name}' is missed")
structs_size = structs_size + self._verify_struct(row[field.name], field)
return structs_size
def _verify_row(self, row: dict):
if not isinstance(row, dict):
self._throw("The input row must be a dict object")
normal_fields_size = self._verify_normal_field(row)
struct_fields_size = self._verify_struct_field(row)
with self._buffer_lock:
self._buffer_size = self._buffer_size + normal_fields_size + struct_fields_size
self._buffer_row_count = self._buffer_row_count + 1
self._total_row_count = self._total_row_count + 1
|
BulkWriter
|
python
|
allegroai__clearml
|
clearml/backend_interface/metrics/events.py
|
{
"start": 18730,
"end": 19633
}
|
class ____(UploadEvent):
def __init__(
self,
metric: str,
variant: str,
stream: Union[io.StringIO, io.BytesIO],
local_image_path: Optional[str] = None,
iter: int = 0,
upload_uri: Optional[str] = None,
file_history_size: Optional[int] = None,
delete_after_upload: bool = False,
**kwargs: Any
) -> None:
super(MediaEvent, self).__init__(
metric,
variant,
image_data=stream,
local_image_path=local_image_path,
iter=iter,
upload_uri=upload_uri,
file_history_size=file_history_size,
delete_after_upload=delete_after_upload,
**kwargs
)
def get_api_event(self) -> "events.MetricsImageEvent":
return events.MetricsImageEvent(url=self._url, key=self._key, **self._get_base_dict())
|
MediaEvent
|
python
|
pypa__setuptools
|
setuptools/warnings.py
|
{
"start": 3420,
"end": 3796
}
|
class ____(SetuptoolsWarning):
"""
Base class for warning deprecations in ``setuptools``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
def _should_enforce():
enforce = os.getenv("SETUPTOOLS_ENFORCE_DEPRECATION", "false").lower()
return enforce in ("true", "on", "ok", "1")
|
SetuptoolsDeprecationWarning
|
python
|
pypa__warehouse
|
tests/unit/test_forms.py
|
{
"start": 1074,
"end": 2495
}
|
class ____:
def test_invalid_fields(self):
validator = PasswordStrengthValidator(user_input_fields=["foo"])
with pytest.raises(ValidationError) as exc:
validator({}, pretend.stub())
assert str(exc.value) == "Invalid field name: 'foo'"
@pytest.mark.parametrize("password", ["this is a great password!"])
def test_good_passwords(self, password):
validator = PasswordStrengthValidator()
validator(pretend.stub(), pretend.stub(data=password))
@pytest.mark.parametrize(
("password", "expected"),
[
(
"qwerty",
(
"This is a top-10 common password. Add another word or two. "
"Uncommon words are better."
),
),
(
"bombo!b",
(
"Password is too easily guessed. Add another word or two. "
"Uncommon words are better."
),
),
("bombo!b asdadad", "Password is too easily guessed."),
],
)
def test_invalid_password(self, password, expected):
validator = PasswordStrengthValidator(required_strength=5)
with pytest.raises(ValidationError) as exc:
validator(pretend.stub(), pretend.stub(data=password))
assert str(exc.value) == expected
|
TestPasswordStrengthValidator
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/indices/document_summary/retrievers.py
|
{
"start": 4685,
"end": 7206
}
|
class ____(BaseRetriever):
"""
Document Summary Index Embedding Retriever.
Args:
index (DocumentSummaryIndex): The index to retrieve from.
similarity_top_k (int): The number of summary nodes to retrieve.
"""
def __init__(
self,
index: DocumentSummaryIndex,
similarity_top_k: int = 1,
embed_model: Optional[BaseEmbedding] = None,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Init params."""
self._index = index
self._vector_store = self._index.vector_store
self._embed_model = embed_model or Settings.embed_model
self._docstore = self._index.docstore
self._index_struct = self._index.index_struct
self._similarity_top_k = similarity_top_k
super().__init__(
callback_manager=callback_manager or Settings.callback_manager,
object_map=object_map,
verbose=verbose,
)
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Retrieve nodes."""
if self._vector_store.is_embedding_query:
if query_bundle.embedding is None:
query_bundle.embedding = (
self._embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
)
query = VectorStoreQuery(
query_embedding=query_bundle.embedding,
similarity_top_k=self._similarity_top_k,
)
query_result = self._vector_store.query(query)
top_k_summary_ids: List[str]
if query_result.ids is not None:
top_k_summary_ids = query_result.ids
elif query_result.nodes is not None:
top_k_summary_ids = [n.node_id for n in query_result.nodes]
else:
raise ValueError(
"Vector store query result should return at least one of nodes or ids."
)
results = []
for summary_id in top_k_summary_ids:
node_ids = self._index_struct.summary_id_to_node_ids[summary_id]
nodes = self._docstore.get_nodes(node_ids)
results.extend([NodeWithScore(node=n) for n in nodes])
return results
# legacy, backward compatibility
DocumentSummaryIndexRetriever = DocumentSummaryIndexLLMRetriever
|
DocumentSummaryIndexEmbeddingRetriever
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-beautiful-substrings-i.py
|
{
"start": 79,
"end": 976
}
|
class ____(object):
def beautifulSubstrings(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
VOWELS = set("aeiou")
prefix = [0]*(len(s)+1)
for i in xrange(len(s)):
prefix[i+1] = prefix[i]+(+1 if s[i] in VOWELS else -1)
new_k = 1
x = k
for i in xrange(2, k+1):
if i*i > k:
break
cnt = 0
while x%i == 0:
x //= i
cnt += 1
if cnt:
new_k *= i**((cnt+1)//2+int(i == 2))
if x != 1:
new_k *= x**((1+1)//2+int(x == 2))
cnt = collections.Counter()
result = 0
for i, p in enumerate(prefix):
result += cnt[p, i%new_k]
cnt[p, i%new_k] += 1
return result
# Time: O(n^2)
# Space: O(1)
# brute force
|
Solution
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/prompt_values.py
|
{
"start": 3057,
"end": 3532
}
|
class ____(PromptValue):
"""Image prompt value."""
image_url: ImageURL
"""Image URL."""
type: Literal["ImagePromptValue"] = "ImagePromptValue"
def to_string(self) -> str:
"""Return prompt (image URL) as string."""
return self.image_url.get("url", "")
def to_messages(self) -> list[BaseMessage]:
"""Return prompt (image URL) as messages."""
return [HumanMessage(content=[cast("dict", self.image_url)])]
|
ImagePromptValue
|
python
|
mahmoud__boltons
|
boltons/cacheutils.py
|
{
"start": 11340,
"end": 13180
}
|
class ____(LRI):
"""The ``LRU`` is :class:`dict` subtype implementation of the
*Least-Recently Used* caching strategy.
Args:
max_size (int): Max number of items to cache. Defaults to ``128``.
values (iterable): Initial values for the cache. Defaults to ``None``.
on_miss (callable): a callable which accepts a single argument, the
key not present in the cache, and returns the value to be cached.
>>> cap_cache = LRU(max_size=2)
>>> cap_cache['a'], cap_cache['b'] = 'A', 'B'
>>> from pprint import pprint as pp
>>> pp(dict(cap_cache))
{'a': 'A', 'b': 'B'}
>>> [cap_cache['b'] for i in range(3)][0]
'B'
>>> cap_cache['c'] = 'C'
>>> print(cap_cache.get('a'))
None
This cache is also instrumented with statistics
collection. ``hit_count``, ``miss_count``, and ``soft_miss_count``
are all integer members that can be used to introspect the
performance of the cache. ("Soft" misses are misses that did not
raise :exc:`KeyError`, e.g., ``LRU.get()`` or ``on_miss`` was used to
cache a default.
>>> cap_cache.hit_count, cap_cache.miss_count, cap_cache.soft_miss_count
(3, 1, 1)
Other than the size-limiting caching behavior and statistics,
``LRU`` acts like its parent class, the built-in Python :class:`dict`.
"""
def __getitem__(self, key):
with self._lock:
try:
link = self._get_link_and_move_to_front_of_ll(key)
except KeyError:
self.miss_count += 1
if not self.on_miss:
raise
ret = self[key] = self.on_miss(key)
return ret
self.hit_count += 1
return link[VALUE]
### Cached decorator
# Key-making technique adapted from Python 3.4's functools
|
LRU
|
python
|
jazzband__django-oauth-toolkit
|
tests/test_auth_backends.py
|
{
"start": 1464,
"end": 3483
}
|
class ____(BaseTest):
def test_authenticate(self):
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "tokstr",
}
request = self.factory.get("/a-resource", **auth_headers)
backend = OAuth2Backend()
credentials = {"request": request}
u = backend.authenticate(**credentials)
self.assertEqual(u, self.user)
def test_authenticate_raises_error_with_invalid_hex_in_query_params(self):
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "tokstr",
}
request = self.factory.get("/a-resource?auth_token=%%7A", **auth_headers)
credentials = {"request": request}
with pytest.raises(SuspiciousOperation):
OAuth2Backend().authenticate(**credentials)
@patch("oauth2_provider.backends.OAuthLibCore.verify_request")
def test_value_errors_are_reraised(self, patched_verify_request):
patched_verify_request.side_effect = ValueError("Generic error")
with pytest.raises(ValueError):
OAuth2Backend().authenticate(request={})
def test_authenticate_fail(self):
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "badstring",
}
request = self.factory.get("/a-resource", **auth_headers)
backend = OAuth2Backend()
credentials = {"request": request}
self.assertIsNone(backend.authenticate(**credentials))
credentials = {"username": "u", "password": "p"}
self.assertIsNone(backend.authenticate(**credentials))
def test_get_user(self):
backend = OAuth2Backend()
self.assertEqual(self.user, backend.get_user(self.user.pk))
self.assertIsNone(backend.get_user(123456))
@override_settings(
AUTHENTICATION_BACKENDS=(
"oauth2_provider.backends.OAuth2Backend",
"django.contrib.auth.backends.ModelBackend",
),
)
@modify_settings(
MIDDLEWARE={
"append": "oauth2_provider.middleware.OAuth2TokenMiddleware",
}
)
|
TestOAuth2Backend
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/models.py
|
{
"start": 24960,
"end": 28252
}
|
class ____(Response):
"""
Response of models.archive_many endpoint.
:param succeeded:
:type succeeded: Sequence[dict]
:param failed:
:type failed: Sequence[dict]
"""
_service = "models"
_action = "archive_many"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"failed": {
"items": {
"properties": {
"error": {
"description": "Error info",
"properties": {
"codes": {
"items": {"type": "integer"},
"type": "array",
},
"data": {
"additionalProperties": True,
"type": "object",
},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {
"description": "ID of the failed entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"succeeded": {
"items": {
"properties": {
"archived": {
"description": "Indicates whether the model was archived",
"type": "boolean",
},
"id": {
"description": "ID of the succeeded entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, succeeded: Optional[List[dict]] = None, failed: Optional[List[dict]] = None, **kwargs: Any
) -> None:
super(ArchiveManyResponse, self).__init__(**kwargs)
self.succeeded = succeeded
self.failed = failed
@schema_property("succeeded")
def succeeded(self) -> Optional[List[dict]]:
return self._property_succeeded
@succeeded.setter
def succeeded(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_succeeded = None
return
self.assert_isinstance(value, "succeeded", (list, tuple))
self.assert_isinstance(value, "succeeded", (dict,), is_array=True)
self._property_succeeded = value
@schema_property("failed")
def failed(self) -> Optional[List[dict]]:
return self._property_failed
@failed.setter
def failed(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_failed = None
return
self.assert_isinstance(value, "failed", (list, tuple))
self.assert_isinstance(value, "failed", (dict,), is_array=True)
self._property_failed = value
|
ArchiveManyResponse
|
python
|
pytorch__pytorch
|
torch/fx/passes/pass_manager.py
|
{
"start": 4692,
"end": 7085
}
|
class ____:
"""
Construct a PassManager.
Collects passes and constraints. This defines the pass schedule, manages
pass constraints and pass execution.
Args:
passes (Optional[List[Callable]]): list of passes. A pass is a
callable which modifies an object and returns modified object
constraint (Optional[List[Callable]]): list of constraints. A
constraint is a callable which takes two passes (A, B) and returns
True if A depends on B and False otherwise. See implementation of
`this_before_that_pass_constraint` for example.
"""
passes: list[Callable]
constraints: list[Callable]
_validated: bool = False
def __init__(
self,
passes=None,
constraints=None,
):
self.passes = passes or []
self.constraints = constraints or []
@classmethod
def build_from_passlist(cls, passes):
pm = PassManager(passes)
# TODO(alexbeloi): add constraint management/validation
return pm
def add_pass(self, _pass: Callable):
self.passes.append(_pass)
self._validated = False
def add_constraint(self, constraint):
self.constraints.append(constraint)
self._validated = False
def remove_pass(self, _passes: list[str]):
if _passes is None:
return
passes_left = [ps for ps in self.passes if ps.__name__ not in _passes]
self.passes = passes_left
self._validated = False
def replace_pass(self, _target, _replacement):
passes_left = []
for ps in self.passes:
if ps.__name__ == _target.__name__:
passes_left.append(_replacement)
else:
passes_left.append(ps)
self.passes = passes_left
self._validated = False
def validate(self):
"""
Validates that current pass schedule defined by `self.passes` is valid
according to all constraints in `self.constraints`
"""
if self._validated:
return
for constraint in self.constraints:
_validate_pass_schedule_constraint(constraint, self.passes)
self._validated = True
def __call__(self, source):
self.validate()
out = source
for _pass in self.passes:
out = _pass(out)
return out
|
PassManager
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.