language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/utils/data/datapipes/iter/combining.py | {
"start": 4137,
"end": 4691
} | class ____(ABC):
r"""Abstract class for container ``DataPipes``. The followings are three required methods."""
@abstractmethod
def get_next_element_by_instance(self, instance_id: int): ...
@abstractmethod
def is_every_instance_exhausted(self) -> bool: ...
@abstractmethod
def reset(self) -> None: ...
@abstractmethod
def get_length_by_instance(self, instance_id: int):
r"""Raise TypeError if it's not supposed to be implemented to support `list(datapipe)`."""
def _no_op(x):
return x
| _ContainerTemplate |
python | davidhalter__parso | parso/python/errors.py | {
"start": 29420,
"end": 30587
} | class ____(SyntaxRule):
# True: int
# {}: float
message = "illegal target for annotation"
def get_node(self, node):
return node.parent
def is_issue(self, node):
type_ = None
lhs = node.parent.children[0]
lhs = _remove_parens(lhs)
try:
children = lhs.children
except AttributeError:
pass
else:
if ',' in children or lhs.type == 'atom' and children[0] == '(':
type_ = 'tuple'
elif lhs.type == 'atom' and children[0] == '[':
type_ = 'list'
trailer = children[-1]
if type_ is None:
if not (lhs.type == 'name'
# subscript/attributes are allowed
or lhs.type in ('atom_expr', 'power')
and trailer.type == 'trailer'
and trailer.children[0] != '('):
return True
else:
# x, y: str
message = "only single target (not %s) can be annotated"
self.add_issue(lhs.parent, message=message % type_)
@ErrorFinder.register_rule(type='argument')
| _AnnotatorRule |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataproc.py | {
"start": 89716,
"end": 94884
} | class ____:
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
version = 6
parameters = {}
op = DataprocInstantiateWorkflowTemplateOperator(
task_id=TASK_ID,
template_id=TEMPLATE_ID,
region=GCP_REGION,
project_id=GCP_PROJECT,
version=version,
parameters=parameters,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context=MagicMock())
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.instantiate_workflow_template.assert_called_once_with(
template_name=TEMPLATE_ID,
region=GCP_REGION,
project_id=GCP_PROJECT,
version=version,
parameters=parameters,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
@mock.patch(DATAPROC_TRIGGERS_PATH.format("DataprocAsyncHook"))
def test_execute_call_defer_method(self, mock_trigger_hook, mock_hook):
operator = DataprocInstantiateWorkflowTemplateOperator(
task_id=TASK_ID,
template_id=TEMPLATE_ID,
region=GCP_REGION,
project_id=GCP_PROJECT,
version=2,
parameters={},
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
operator.execute(mock.MagicMock())
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.instantiate_workflow_template.assert_called_once()
mock_hook.return_value.wait_for_operation.assert_not_called()
assert isinstance(exc.value.trigger, DataprocOperationTrigger)
assert exc.value.method_name == GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_on_kill(self, mock_hook):
operation_name = "operation_name"
mock_hook.return_value.instantiate_workflow_template.return_value.operation.name = operation_name
op = DataprocInstantiateWorkflowTemplateOperator(
task_id=TASK_ID,
template_id=TEMPLATE_ID,
region=GCP_REGION,
project_id=GCP_PROJECT,
version=2,
parameters={},
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
cancel_on_kill=False,
)
op.execute(context=mock.MagicMock())
op.on_kill()
mock_hook.return_value.get_operations_client.return_value.cancel_operation.assert_not_called()
op.cancel_on_kill = True
op.on_kill()
mock_hook.return_value.get_operations_client.return_value.cancel_operation.assert_called_once_with(
name=operation_name
)
@pytest.mark.db_test
@pytest.mark.need_serialized_dag
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_instantiate_workflow_operator_extra_links(
mock_hook, dag_maker, create_task_instance_of_operator, mock_supervisor_comms
):
mock_hook.return_value.project_id = GCP_PROJECT
ti = create_task_instance_of_operator(
DataprocInstantiateWorkflowTemplateOperator,
dag_id=TEST_DAG_ID,
task_id=TASK_ID,
region=GCP_REGION,
project_id=GCP_PROJECT,
template_id=TEMPLATE_ID,
gcp_conn_id=GCP_CONN_ID,
)
serialized_dag = dag_maker.get_serialized_data()
# Assert operator links for serialized DAG
deserialized_dag = SerializedDAG.deserialize_dag(serialized_dag["dag"])
operator_extra_link = deserialized_dag.tasks[0].operator_extra_links[0]
assert operator_extra_link.name == "Dataproc Workflow"
if AIRFLOW_V_3_0_PLUS:
mock_supervisor_comms.send.return_value = XComResult(
key="dataproc_workflow",
value="",
)
# Assert operator link is empty when no XCom push occurred
assert ti.task.operator_extra_links[0].get_link(operator=ti.task, ti_key=ti.key) == ""
ti.xcom_push(key="dataproc_workflow", value=DATAPROC_WORKFLOW_EXPECTED)
if AIRFLOW_V_3_0_PLUS:
mock_supervisor_comms.send.return_value = XComResult(
key="dataproc_workflow",
value=DATAPROC_WORKFLOW_EXPECTED,
)
# Assert operator links after execution
assert (
ti.task.operator_extra_links[0].get_link(operator=ti.task, ti_key=ti.key)
== DATAPROC_WORKFLOW_LINK_EXPECTED
)
| TestDataprocInstantiateWorkflowTemplateOperator |
python | pytorch__pytorch | tools/stats/upload_metrics.py | {
"start": 568,
"end": 5833
} | class ____:
name: str
env_var: str
required: bool = True
# Used to cast the value of the env_var to the correct type (defaults to str)
type_conversion_fn: Any = None
def __init__(
self,
name: str,
env_var: str,
required: bool = True,
type_conversion_fn: Any = None,
) -> None:
self.name = name
self.env_var = env_var
self.required = required
self.type_conversion_fn = type_conversion_fn
def value(self) -> Any:
value = os.environ.get(self.env_var)
# Github CI will set some env vars to an empty string
DEFAULT_ENVVAR_VALUES = [None, ""]
if value in DEFAULT_ENVVAR_VALUES:
if not self.required:
return None
raise ValueError(
f"Missing {self.name}. Please set the {self.env_var} "
"environment variable to pass in this value."
)
if self.type_conversion_fn:
return self.type_conversion_fn(value)
return value
global_metrics: dict[str, Any] = {}
def add_global_metric(metric_name: str, metric_value: Any) -> None:
"""
Adds stats that should be emitted with every metric by the current process.
If the emit_metrics method specifies a metric with the same name, it will
overwrite this value.
"""
global_metrics[metric_name] = metric_value
def emit_metric(
metric_name: str,
metrics: dict[str, Any],
) -> None:
"""
Upload a metric to DynamoDB (and from there, the HUD backend database).
Even if EMIT_METRICS is set to False, this function will still run the code to
validate and shape the metrics, skipping just the upload.
Parameters:
metric_name:
Name of the metric. Every unique metric should have a different name
and be emitted just once per run attempt.
Metrics are namespaced by their module and the function that emitted them.
metrics: The actual data to record.
Some default values are populated from environment variables, which must be set
for metrics to be emitted. (If they're not set, this function becomes a noop):
"""
if metrics is None:
raise ValueError("You didn't ask to upload any metrics!")
# Merge the given metrics with the global metrics, overwriting any duplicates
# with the given metrics.
metrics = {**global_metrics, **metrics}
# We use these env vars that to determine basic info about the workflow run.
# By using env vars, we don't have to pass this info around to every function.
# It also helps ensure that we only emit metrics during CI
env_var_metrics = [
EnvVarMetric("repo", "GITHUB_REPOSITORY"),
EnvVarMetric("workflow", "GITHUB_WORKFLOW"),
EnvVarMetric("build_environment", "BUILD_ENVIRONMENT", required=False),
EnvVarMetric("job", "GITHUB_JOB"),
EnvVarMetric("test_config", "TEST_CONFIG", required=False),
EnvVarMetric("pr_number", "PR_NUMBER", required=False, type_conversion_fn=int),
EnvVarMetric("run_id", "GITHUB_RUN_ID", type_conversion_fn=int),
EnvVarMetric("run_number", "GITHUB_RUN_NUMBER", type_conversion_fn=int),
EnvVarMetric("run_attempt", "GITHUB_RUN_ATTEMPT", type_conversion_fn=int),
EnvVarMetric("job_id", "JOB_ID", type_conversion_fn=int),
EnvVarMetric("job_name", "JOB_NAME"),
]
# Use info about the function that invoked this one as a namespace and a way to filter metrics.
calling_frame = inspect.currentframe().f_back # type: ignore[union-attr]
calling_frame_info = inspect.getframeinfo(calling_frame) # type: ignore[arg-type]
calling_file = os.path.basename(calling_frame_info.filename)
calling_module = inspect.getmodule(calling_frame).__name__ # type: ignore[union-attr]
calling_function = calling_frame_info.function
try:
default_metrics = {
"metric_name": metric_name,
"calling_file": calling_file,
"calling_module": calling_module,
"calling_function": calling_function,
"timestamp": datetime.datetime.now(timezone.utc).strftime(
"%Y-%m-%d %H:%M:%S.%f"
),
**{m.name: m.value() for m in env_var_metrics if m.value()},
}
except ValueError as e:
warn(f"Not emitting metrics for {metric_name}. {e}")
return
# Prefix key with metric name and timestamp to derisk chance of a uuid1 name collision
s3_key = f"{metric_name}_{int(time.time())}_{uuid.uuid1().hex}"
if EMIT_METRICS:
try:
upload_to_s3(
bucket_name="ossci-raw-job-status",
key=f"ossci_uploaded_metrics/{s3_key}",
docs=[{**default_metrics, "info": metrics}],
)
except Exception as e:
# We don't want to fail the job if we can't upload the metric.
# We still raise the ValueErrors outside this try block since those indicate improperly configured metrics
warn(f"Error uploading metric {metric_name} to DynamoDB: {e}")
return
else:
print(f"Not emitting metrics for {metric_name}. Boto wasn't imported.")
| EnvVarMetric |
python | ray-project__ray | python/ray/tune/examples/custom_checkpointing_with_callback.py | {
"start": 588,
"end": 4060
} | class ____(Callback):
"""Custom callback that triggers checkpointing by updating the result dict.
This callback demonstrates checkpointing logic beyond
simple periodic checkpointing. It checkpoints based on performance improvements
or when the loss becomes unstable.
Args:
checkpoint_on_improvement: Checkpoint when loss improves significantly
checkpoint_on_instability: Checkpoint when loss becomes unstable
"""
def __init__(
self,
*,
checkpoint_on_improvement: bool = True,
checkpoint_on_instability: bool = True,
):
self.checkpoint_on_improvement = checkpoint_on_improvement
self.checkpoint_on_instability = checkpoint_on_instability
self.best_loss_per_trial = {}
self.recent_losses_per_trial = {}
def on_trial_result(self, iteration, trials, trial, result, **info):
"""Called after receiving a result from the trainable.
This hook implements intelligent checkpointing logic:
1. Checkpoint when we see significant improvement
2. Checkpoint when loss becomes unstable (variance increases)
3. Always checkpoint at specific milestones (every 10 steps)
"""
trial_id = trial.trial_id
current_loss = result.get("mean_loss", float("inf"))
current_step = result.get("iterations", 0)
# Initialize tracking for this trial
if trial_id not in self.best_loss_per_trial:
self.best_loss_per_trial[trial_id] = float("inf")
self.recent_losses_per_trial[trial_id] = []
should_checkpoint = False
reason = ""
# 1. Checkpoint every 10 steps as a baseline
if current_step > 0 and current_step % 10 == 0:
should_checkpoint = True
reason = f"milestone at step {current_step}"
# 2. Checkpoint on significant improvement
if self.checkpoint_on_improvement:
if (
current_loss < self.best_loss_per_trial[trial_id] * 0.9
): # 10% improvement
should_checkpoint = True
reason = f"significant improvement: {current_loss:.4f} < {self.best_loss_per_trial[trial_id]:.4f}"
self.best_loss_per_trial[trial_id] = current_loss
# 3. Checkpoint on instability (high variance in recent losses)
if self.checkpoint_on_instability and current_step > 5:
recent_losses = self.recent_losses_per_trial[trial_id]
recent_losses.append(current_loss)
if len(recent_losses) > 5:
recent_losses.pop(0) # Keep only last 5 losses
if len(recent_losses) == 5:
variance = (
sum((x - sum(recent_losses) / 5) ** 2 for x in recent_losses) / 5
)
if variance > 0.1: # High variance threshold
should_checkpoint = True
reason = f"instability detected: variance={variance:.4f}"
else:
# Track recent losses
recent_losses = self.recent_losses_per_trial[trial_id]
recent_losses.append(current_loss)
if len(recent_losses) > 5:
recent_losses.pop(0)
if should_checkpoint:
print(
f"Callback requesting checkpoint for trial {trial_id} at step {current_step}: {reason}"
)
result[SHOULD_CHECKPOINT] = True
| SmartCheckpointCallback |
python | tiangolo__fastapi | tests/test_response_model_data_filter.py | {
"start": 237,
"end": 288
} | class ____(UserBase):
hashed_password: str
| UserDB |
python | fastapi__sqlmodel | docs_src/tutorial/update/tutorial001_py310.py | {
"start": 71,
"end": 1753
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def update_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Spider-Boy")
results = session.exec(statement)
hero = results.one()
print("Hero:", hero)
hero.age = 16
session.add(hero)
session.commit()
session.refresh(hero)
print("Updated hero:", hero)
def main():
create_db_and_tables()
create_heroes()
update_heroes()
if __name__ == "__main__":
main()
| Hero |
python | huggingface__transformers | src/transformers/models/qwen3/modeling_qwen3.py | {
"start": 15588,
"end": 16129
} | class ____(PreTrainedModel):
config: Qwen3Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Qwen3DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Qwen3DecoderLayer,
"attentions": Qwen3Attention,
}
@auto_docstring
| Qwen3PreTrainedModel |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/record/__init__.py | {
"start": 17407,
"end": 22576
} | class ____:
"""Object that allows us to just-in-time compile a checked __new__ implementation on first use.
This has two benefits:
1. Defer processing ForwardRefs until their definitions are in scope.
2. Avoid up-front cost for unused objects.
"""
__name__ = _CHECKED_NEW
def __init__(
self,
field_set: Mapping[str, type],
defaults: Mapping[str, Any],
eval_ctx: EvalContext,
new_frames: int,
kw_only: bool,
):
self._field_set = field_set
self._defaults = defaults
self._eval_ctx = eval_ctx
self._new_frames = new_frames # how many frames of __new__ there are
self._compiled = False
self._kw_only = kw_only
def __call__(self, cls, *args, **kwargs):
if _do_defensive_checks():
# this condition can happen during races in threaded envs so only
# invariant when opted-in
check.invariant(
self._compiled is False,
"failed to set compiled __new__ appropriately",
)
# update the context with callsite locals/globals to resolve
# ForwardRefs that were unavailable at definition time.
self._eval_ctx.update_from_frame(1 + self._new_frames)
# we are double-memoizing this to handle some confusing mro issues
# in which the _nt_base's __new__ method is not on the critical
# path, causing this to get invoked multiple times
compiled_fn = self._eval_ctx.compile_fn(
self._build_checked_new_str(),
_CHECKED_NEW,
)
self._compiled = True
# replace this holder object with the compiled fn by finding where it was in the hierarchy
for c in cls.__mro__:
if c.__new__ is self:
c.__new__ = compiled_fn
return compiled_fn(cls, *args, **kwargs)
def _build_checked_new_str(self) -> str:
args_str, set_calls_str = build_args_and_assignment_strs(
self._field_set.keys(),
self._defaults,
self._kw_only,
)
check_calls = []
for name, ttype in self._field_set.items():
call_str = build_check_call_str(
ttype=ttype,
name=name,
eval_ctx=self._eval_ctx,
)
check_calls.append(f"{name}={call_str}")
check_call_block = ",\n ".join(check_calls)
lazy_imports_str = "\n ".join(
f"from {module} import {t}" for t, module in self._eval_ctx.lazy_imports.items()
)
checked_new_str = f"""
def __checked_new__(cls{args_str}):
{lazy_imports_str}
{set_calls_str}
return cls.{_NAMED_TUPLE_BASE_NEW_FIELD}(
cls,
{check_call_block}
)
"""
return checked_new_str
def _build_defaults_new(
field_set: Mapping[str, type],
defaults: Mapping[str, Any],
kw_only: bool,
) -> str:
"""Build a __new__ implementation that handles default values."""
kw_args_str, set_calls_str = build_args_and_assignment_strs(
field_set,
defaults,
kw_only,
)
assign_str = ",\n ".join([f"{name}={name}" for name in field_set.keys()])
return f"""
def __defaults_new__(cls{kw_args_str}):
{set_calls_str}
return cls.{_NAMED_TUPLE_BASE_NEW_FIELD}(
cls,
{assign_str}
)
"""
def _banned_iter(self, *args, **kwargs):
raise Exception(f"Iteration is not allowed on `@record` {self.__class__.__name__}.")
def _banned_idx(self, *args, **kwargs):
raise Exception(f"Index access is not allowed on `@record` {self.__class__.__name__}.")
def _true(_):
return True
def _from_reduce(cls, kwargs):
# loading from pickle bypasses checked / custom __new__ and
# just reconstructs the base namedtuple
return getattr(cls, _NAMED_TUPLE_BASE_NEW_FIELD)(
cls,
**kwargs,
)
def _reduce(self):
# pickle support
return _from_reduce, (self.__class__, as_dict(self))
def _repr(self) -> str:
# the __repr__ method generated for namedtuples cannot handle subclasses that have different
# sets of fields
field_set = getattr(self, _RECORD_ANNOTATIONS_FIELD)
values = [f"{field_name}={getattr(self, field_name)!r}" for field_name in field_set]
return f"{self.__class__.__name__}({', '.join(values)})"
def _defines_own_new(cls) -> bool:
qualname = getattr(cls.__new__, "__qualname__", None)
if not qualname:
return False
qualname_parts = cls.__new__.__qualname__.split(".")
if len(qualname_parts) < 2:
return False
return qualname_parts[-2] == cls.__name__
def _do_defensive_checks():
return bool(os.getenv("DAGSTER_RECORD_DEFENSIVE_CHECKS"))
@classmethod
def _pydantic_core_schema(cls, source: Any, handler):
"""Forces pydantic_core to treat records as normal types instead of namedtuples. In particular,
pydantic assumes that all namedtuples can be constructed with posargs, while records are kw-only.
"""
from pydantic_core import core_schema
return core_schema.is_instance_schema(cls)
| JitCheckedNew |
python | tqdm__tqdm | tqdm/std.py | {
"start": 3938,
"end": 6939
} | class ____(object):
"""
`str.format`-able bar with format specifiers: `[width][type]`
- `width`
+ unspecified (default): use `self.default_len`
+ `int >= 0`: overrides `self.default_len`
+ `int < 0`: subtract from `self.default_len`
- `type`
+ `a`: ascii (`charset=self.ASCII` override)
+ `u`: unicode (`charset=self.UTF` override)
+ `b`: blank (`charset=" "` override)
"""
ASCII = " 123456789#"
UTF = u" " + u''.join(map(chr, range(0x258F, 0x2587, -1)))
BLANK = " "
COLOUR_RESET = '\x1b[0m'
COLOUR_RGB = '\x1b[38;2;%d;%d;%dm'
COLOURS = {'BLACK': '\x1b[30m', 'RED': '\x1b[31m', 'GREEN': '\x1b[32m',
'YELLOW': '\x1b[33m', 'BLUE': '\x1b[34m', 'MAGENTA': '\x1b[35m',
'CYAN': '\x1b[36m', 'WHITE': '\x1b[37m'}
def __init__(self, frac, default_len=10, charset=UTF, colour=None):
if not 0 <= frac <= 1:
warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
frac = max(0, min(1, frac))
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
self.colour = colour
@property
def colour(self):
return self._colour
@colour.setter
def colour(self, value):
if not value:
self._colour = None
return
try:
if value.upper() in self.COLOURS:
self._colour = self.COLOURS[value.upper()]
elif value[0] == '#' and len(value) == 7:
self._colour = self.COLOUR_RGB % tuple(
int(i, 16) for i in (value[1:3], value[3:5], value[5:7]))
else:
raise KeyError
except (KeyError, AttributeError):
warn("Unknown colour (%s); valid choices: [hex (#00ff00), %s]" % (
value, ", ".join(self.COLOURS)),
TqdmWarning, stacklevel=2)
self._colour = None
def __format__(self, format_spec):
if format_spec:
_type = format_spec[-1].lower()
try:
charset = {'a': self.ASCII, 'u': self.UTF, 'b': self.BLANK}[_type]
except KeyError:
charset = self.charset
else:
format_spec = format_spec[:-1]
if format_spec:
N_BARS = int(format_spec)
if N_BARS < 0:
N_BARS += self.default_len
else:
N_BARS = self.default_len
else:
charset = self.charset
N_BARS = self.default_len
nsyms = len(charset) - 1
bar_length, frac_bar_length = divmod(int(self.frac * N_BARS * nsyms), nsyms)
res = charset[-1] * bar_length
if bar_length < N_BARS: # whitespace padding
res = res + charset[frac_bar_length] + charset[0] * (N_BARS - bar_length - 1)
return self.colour + res + self.COLOUR_RESET if self.colour else res
| Bar |
python | Netflix__metaflow | metaflow/plugins/kubernetes/kubernetes_jobsets.py | {
"start": 356,
"end": 8510
} | class ____(MetaflowException):
headline = "Kubernetes jobset error"
# TODO [DUPLICATE CODE]: Refactor this method to a separate file so that
# It can be used by both KubernetesJob and KubernetesJobset
def k8s_retry(deadline_seconds=60, max_backoff=32):
def decorator(function):
from functools import wraps
@wraps(function)
def wrapper(*args, **kwargs):
from kubernetes import client
deadline = time.time() + deadline_seconds
retry_number = 0
while True:
try:
result = function(*args, **kwargs)
return result
except client.rest.ApiException as e:
if e.status == 500:
current_t = time.time()
backoff_delay = min(
math.pow(2, retry_number) + random.random(), max_backoff
)
if current_t + backoff_delay < deadline:
time.sleep(backoff_delay)
retry_number += 1
continue # retry again
else:
raise
else:
raise
return wrapper
return decorator
JobsetStatus = namedtuple(
"JobsetStatus",
[
"control_pod_failed", # boolean
"control_exit_code",
"control_pod_status", # string like (<pod-status>):(<container-status>) [used for user-messaging]
"control_started",
"control_completed",
"worker_pods_failed",
"workers_are_suspended",
"workers_have_started",
"all_jobs_are_suspended",
"jobset_finished",
"jobset_failed",
"status_unknown",
"jobset_was_terminated",
"some_jobs_are_running",
],
)
def _basic_validation_for_js(jobset):
if not jobset.get("status") or not _retrieve_replicated_job_statuses(jobset):
return False
worker_jobs = [
w for w in jobset.get("spec").get("replicatedJobs") if w["name"] == "worker"
]
if len(worker_jobs) == 0:
raise KubernetesJobsetException("No worker jobs found in the jobset manifest")
control_job = [
w for w in jobset.get("spec").get("replicatedJobs") if w["name"] == "control"
]
if len(control_job) == 0:
raise KubernetesJobsetException("No control job found in the jobset manifest")
return True
def _derive_pod_status_and_status_code(control_pod):
overall_status = None
control_exit_code = None
control_pod_failed = False
if control_pod:
container_status = None
pod_status = control_pod.get("status", {}).get("phase")
container_statuses = control_pod.get("status", {}).get("containerStatuses")
if container_statuses is None:
container_status = ": ".join(
filter(
None,
[
control_pod.get("status", {}).get("reason"),
control_pod.get("status", {}).get("message"),
],
)
)
else:
for k, v in container_statuses[0].get("state", {}).items():
if v is not None:
control_exit_code = v.get("exit_code")
container_status = ": ".join(
filter(
None,
[v.get("reason"), v.get("message")],
)
)
if container_status is None:
overall_status = "pod status: %s | container status: %s" % (
pod_status,
container_status,
)
else:
overall_status = "pod status: %s" % pod_status
if pod_status == "Failed":
control_pod_failed = True
return overall_status, control_exit_code, control_pod_failed
def _retrieve_replicated_job_statuses(jobset):
# We needed this abstraction because Jobsets changed thier schema
# in version v0.3.0 where `ReplicatedJobsStatus` became `replicatedJobsStatus`
# So to handle users having an older version of jobsets, we need to account
# for both the schemas.
if jobset.get("status", {}).get("replicatedJobsStatus", None):
return jobset.get("status").get("replicatedJobsStatus")
elif jobset.get("status", {}).get("ReplicatedJobsStatus", None):
return jobset.get("status").get("ReplicatedJobsStatus")
return None
def _construct_jobset_logical_status(jobset, control_pod=None):
if not _basic_validation_for_js(jobset):
return JobsetStatus(
control_started=False,
control_completed=False,
workers_are_suspended=False,
workers_have_started=False,
all_jobs_are_suspended=False,
jobset_finished=False,
jobset_failed=False,
status_unknown=True,
jobset_was_terminated=False,
control_exit_code=None,
control_pod_status=None,
worker_pods_failed=False,
control_pod_failed=False,
some_jobs_are_running=False,
)
js_status = jobset.get("status")
control_started = False
control_completed = False
workers_are_suspended = False
workers_have_started = False
all_jobs_are_suspended = jobset.get("spec", {}).get("suspend", False)
jobset_finished = False
jobset_failed = False
status_unknown = False
jobset_was_terminated = False
worker_pods_failed = False
some_jobs_are_running = False
total_worker_jobs = [
w["replicas"]
for w in jobset.get("spec").get("replicatedJobs", [])
if w["name"] == "worker"
][0]
total_control_jobs = [
w["replicas"]
for w in jobset.get("spec").get("replicatedJobs", [])
if w["name"] == "control"
][0]
if total_worker_jobs == 0 and total_control_jobs == 0:
jobset_was_terminated = True
replicated_job_statuses = _retrieve_replicated_job_statuses(jobset)
for job_status in replicated_job_statuses:
if job_status["active"] > 0:
some_jobs_are_running = True
if job_status["name"] == "control":
control_started = job_status["active"] > 0 or job_status["succeeded"] > 0
control_completed = job_status["succeeded"] > 0
if job_status["failed"] > 0:
jobset_failed = True
if job_status["name"] == "worker":
workers_have_started = job_status["active"] == total_worker_jobs
if "suspended" in job_status:
# `replicatedJobStatus` didn't have `suspend` field
# until v0.3.0. So we need to account for that.
workers_are_suspended = job_status["suspended"] > 0
if job_status["failed"] > 0:
worker_pods_failed = True
jobset_failed = True
if js_status.get("conditions"):
for condition in js_status["conditions"]:
if condition["type"] == "Completed":
jobset_finished = True
if condition["type"] == "Failed":
jobset_failed = True
(
overall_status,
control_exit_code,
control_pod_failed,
) = _derive_pod_status_and_status_code(control_pod)
return JobsetStatus(
control_started=control_started,
control_completed=control_completed,
workers_are_suspended=workers_are_suspended,
workers_have_started=workers_have_started,
all_jobs_are_suspended=all_jobs_are_suspended,
jobset_finished=jobset_finished,
jobset_failed=jobset_failed,
status_unknown=status_unknown,
jobset_was_terminated=jobset_was_terminated,
control_exit_code=control_exit_code,
control_pod_status=overall_status,
worker_pods_failed=worker_pods_failed,
control_pod_failed=control_pod_failed,
some_jobs_are_running=some_jobs_are_running,
)
| KubernetesJobsetException |
python | python-pillow__Pillow | src/PIL/SpiderImagePlugin.py | {
"start": 2541,
"end": 10249
} | class ____(ImageFile.ImageFile):
format = "SPIDER"
format_description = "Spider 2D image"
_close_exclusive_fp_after_loading = False
def _open(self) -> None:
# check header
n = 27 * 4 # read 27 float values
f = self.fp.read(n)
try:
self.bigendian = 1
t = struct.unpack(">27f", f) # try big-endian first
hdrlen = isSpiderHeader(t)
if hdrlen == 0:
self.bigendian = 0
t = struct.unpack("<27f", f) # little-endian
hdrlen = isSpiderHeader(t)
if hdrlen == 0:
msg = "not a valid Spider file"
raise SyntaxError(msg)
except struct.error as e:
msg = "not a valid Spider file"
raise SyntaxError(msg) from e
h = (99,) + t # add 1 value : spider header index starts at 1
iform = int(h[5])
if iform != 1:
msg = "not a Spider 2D image"
raise SyntaxError(msg)
self._size = int(h[12]), int(h[2]) # size in pixels (width, height)
self.istack = int(h[24])
self.imgnumber = int(h[27])
if self.istack == 0 and self.imgnumber == 0:
# stk=0, img=0: a regular 2D image
offset = hdrlen
self._nimages = 1
elif self.istack > 0 and self.imgnumber == 0:
# stk>0, img=0: Opening the stack for the first time
self.imgbytes = int(h[12]) * int(h[2]) * 4
self.hdrlen = hdrlen
self._nimages = int(h[26])
# Point to the first image in the stack
offset = hdrlen * 2
self.imgnumber = 1
elif self.istack == 0 and self.imgnumber > 0:
# stk=0, img>0: an image within the stack
offset = hdrlen + self.stkoffset
self.istack = 2 # So Image knows it's still a stack
else:
msg = "inconsistent stack header values"
raise SyntaxError(msg)
if self.bigendian:
self.rawmode = "F;32BF"
else:
self.rawmode = "F;32F"
self._mode = "F"
self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, offset, self.rawmode)]
self._fp = self.fp # FIXME: hack
@property
def n_frames(self) -> int:
return self._nimages
@property
def is_animated(self) -> bool:
return self._nimages > 1
# 1st image index is zero (although SPIDER imgnumber starts at 1)
def tell(self) -> int:
if self.imgnumber < 1:
return 0
else:
return self.imgnumber - 1
def seek(self, frame: int) -> None:
if self.istack == 0:
msg = "attempt to seek in a non-stack file"
raise EOFError(msg)
if not self._seek_check(frame):
return
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self.stkoffset = self.hdrlen + frame * (self.hdrlen + self.imgbytes)
self.fp = self._fp
self.fp.seek(self.stkoffset)
self._open()
# returns a byte image after rescaling to 0..255
def convert2byte(self, depth: int = 255) -> Image.Image:
extrema = self.getextrema()
assert isinstance(extrema[0], float)
minimum, maximum = cast(tuple[float, float], extrema)
m: float = 1
if maximum != minimum:
m = depth / (maximum - minimum)
b = -m * minimum
return self.point(lambda i: i * m + b).convert("L")
if TYPE_CHECKING:
from . import ImageTk
# returns a ImageTk.PhotoImage object, after rescaling to 0..255
def tkPhotoImage(self) -> ImageTk.PhotoImage:
from . import ImageTk
return ImageTk.PhotoImage(self.convert2byte(), palette=256)
# --------------------------------------------------------------------
# Image series
# given a list of filenames, return a list of images
def loadImageSeries(filelist: list[str] | None = None) -> list[Image.Image] | None:
"""create a list of :py:class:`~PIL.Image.Image` objects for use in a montage"""
if filelist is None or len(filelist) < 1:
return None
byte_imgs = []
for img in filelist:
if not os.path.exists(img):
print(f"unable to find {img}")
continue
try:
with Image.open(img) as im:
assert isinstance(im, SpiderImageFile)
byte_im = im.convert2byte()
except Exception:
if not isSpiderImage(img):
print(f"{img} is not a Spider image file")
continue
byte_im.info["filename"] = img
byte_imgs.append(byte_im)
return byte_imgs
# --------------------------------------------------------------------
# For saving images in Spider format
def makeSpiderHeader(im: Image.Image) -> list[bytes]:
nsam, nrow = im.size
lenbyt = nsam * 4 # There are labrec records in the header
labrec = int(1024 / lenbyt)
if 1024 % lenbyt != 0:
labrec += 1
labbyt = labrec * lenbyt
nvalues = int(labbyt / 4)
if nvalues < 23:
return []
hdr = [0.0] * nvalues
# NB these are Fortran indices
hdr[1] = 1.0 # nslice (=1 for an image)
hdr[2] = float(nrow) # number of rows per slice
hdr[3] = float(nrow) # number of records in the image
hdr[5] = 1.0 # iform for 2D image
hdr[12] = float(nsam) # number of pixels per line
hdr[13] = float(labrec) # number of records in file header
hdr[22] = float(labbyt) # total number of bytes in header
hdr[23] = float(lenbyt) # record length in bytes
# adjust for Fortran indexing
hdr = hdr[1:]
hdr.append(0.0)
# pack binary data into a string
return [struct.pack("f", v) for v in hdr]
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode != "F":
im = im.convert("F")
hdr = makeSpiderHeader(im)
if len(hdr) < 256:
msg = "Error creating Spider header"
raise OSError(msg)
# write the SPIDER header
fp.writelines(hdr)
rawmode = "F;32NF" # 32-bit native floating point
ImageFile._save(im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, rawmode)])
def _save_spider(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
# get the filename extension and register it with Image
filename_ext = os.path.splitext(filename)[1]
ext = filename_ext.decode() if isinstance(filename_ext, bytes) else filename_ext
Image.register_extension(SpiderImageFile.format, ext)
_save(im, fp, filename)
# --------------------------------------------------------------------
Image.register_open(SpiderImageFile.format, SpiderImageFile)
Image.register_save(SpiderImageFile.format, _save_spider)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Syntax: python3 SpiderImagePlugin.py [infile] [outfile]")
sys.exit()
filename = sys.argv[1]
if not isSpiderImage(filename):
print("input image must be in Spider format")
sys.exit()
with Image.open(filename) as im:
print(f"image: {im}")
print(f"format: {im.format}")
print(f"size: {im.size}")
print(f"mode: {im.mode}")
print("max, min: ", end=" ")
print(im.getextrema())
if len(sys.argv) > 2:
outfile = sys.argv[2]
# perform some image operation
im = im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
print(
f"saving a flipped version of {os.path.basename(filename)} "
f"as {outfile} "
)
im.save(outfile, SpiderImageFile.format)
| SpiderImageFile |
python | getsentry__sentry | src/sentry/eventstream/kafka/dispatch.py | {
"start": 3387,
"end": 3768
} | class ____(PostProcessForwarderStrategyFactory):
@staticmethod
def _dispatch_function(
message: Message[KafkaPayload], eventstream_type: str | None = None
) -> None:
with _sampled_eventstream_timer(instance="_get_task_kwargs_and_dispatch"):
return _get_task_kwargs_and_dispatch(message, eventstream_type)
| EventPostProcessForwarderStrategyFactory |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py | {
"start": 3064,
"end": 3355
} | class ____(ResolutionError):
def __init__(self, round_count):
super(ResolutionTooDeep, self).__init__(round_count)
self.round_count = round_count
# Resolution state in a round.
State = collections.namedtuple("State", "mapping criteria backtrack_causes")
| ResolutionTooDeep |
python | gevent__gevent | src/greentest/3.14/test_smtplib.py | {
"start": 54166,
"end": 59143
} | class ____(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
threading_helper.join_thread(self.thread)
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
| SMTPUTF8SimTests |
python | dask__distributed | distributed/shuffle/tests/test_shuffle.py | {
"start": 15636,
"end": 23231
} | class ____(_ShuffleRunManager):
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.in_get_or_create = asyncio.Event()
self.block_get_or_create = asyncio.Event()
async def get_or_create(self, *args: Any, **kwargs: Any) -> ShuffleRun:
self.in_get_or_create.set()
await self.block_get_or_create.wait()
return await super().get_or_create(*args, **kwargs)
@mock.patch(
"distributed.shuffle._worker_plugin._ShuffleRunManager",
BlockedGetOrCreateShuffleRunManager,
)
@gen_cluster(
client=True,
nthreads=[("", 1)] * 2,
config={"distributed.scheduler.allowed-failures": 0},
)
async def test_get_or_create_from_dangling_transfer(c, s, a, b):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-03-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", force=True)
out = c.compute(out.x.size)
shuffle_extA = a.plugins["shuffle"]
shuffle_extB = b.plugins["shuffle"]
shuffle_extB.shuffle_runs.block_get_or_create.set()
await shuffle_extA.shuffle_runs.in_get_or_create.wait()
await assert_worker_cleanup(b, close=True)
await async_poll_for(
lambda: not any(ws.processing for ws in s.workers.values()), timeout=5
)
with pytest.raises(KilledWorker):
await out
await async_poll_for(lambda: not s.plugins["shuffle"].active_shuffles, timeout=5)
assert a.state.tasks
shuffle_extA.shuffle_runs.block_get_or_create.set()
await async_poll_for(lambda: not a.state.tasks, timeout=10)
assert not s.plugins["shuffle"].active_shuffles
await assert_worker_cleanup(a)
await c.close()
await assert_scheduler_cleanup(s)
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_crashed_worker_during_transfer(c, s, a):
async with (
Nanny(s.address, nthreads=1) as n,
wait_until_worker_has_tasks(
"shuffle-transfer", n.worker_address, 1, s
) as event,
):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-03-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
shuffled = df.shuffle("x", force=True)
fut = c.compute([shuffled, df], sync=True)
await event.wait()
await n.process.process.kill()
result, expected = await fut
dd.assert_eq(result, expected)
await c.close()
await assert_worker_cleanup(a)
await assert_scheduler_cleanup(s)
@gen_cluster(
client=True,
nthreads=[],
# Effectively disable the memory monitor to be able to manually control
# the worker status
config={"distributed.worker.memory.monitor-interval": "60s"},
)
async def test_restarting_does_not_deadlock(c, s):
"""Regression test for https://github.com/dask/distributed/issues/8088"""
async with Worker(s.address) as a:
# Ensure that a holds the input tasks to the shuffle
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-03-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
df = await c.persist(df)
expected = await c.compute(df)
async with Worker(s.address) as b:
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", force=True)
assert not s.workers[b.worker_address].has_what
result = c.compute(out)
while not s.extensions["shuffle"].active_shuffles:
await asyncio.sleep(0)
a.status = Status.paused
await async_poll_for(lambda: len(s.running) == 1, timeout=5)
b.batched_stream.close()
await async_poll_for(lambda: not s.running, timeout=5)
a.status = Status.running
await async_poll_for(lambda: s.running, timeout=5)
result = await result
assert dd.assert_eq(result, expected)
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("", 1)] * 2)
async def test_closed_input_only_worker_during_transfer(c, s, a, b):
def mock_get_worker_for_range_sharding(
output_partition: int, workers: list[str], npartitions: int
) -> str:
return a.address
with mock.patch(
"distributed.shuffle._shuffle._get_worker_for_range_sharding",
mock_get_worker_for_range_sharding,
):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-05-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
shuffled = df.shuffle("x", force=True)
fut = c.compute([shuffled, df], sync=True)
await wait_for_tasks_in_state("shuffle-transfer", "memory", 1, b, 0.001)
await assert_worker_cleanup(b, close=True)
result, expected = await fut
dd.assert_eq(result, expected)
await c.close()
await assert_worker_cleanup(a)
await assert_scheduler_cleanup(s)
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("", 1)], clean_kwargs={"processes": False})
async def test_crashed_input_only_worker_during_transfer(c, s, a):
def mock_mock_get_worker_for_range_sharding(
output_partition: int, workers: list[str], npartitions: int
) -> str:
return a.address
with mock.patch(
"distributed.shuffle._shuffle._get_worker_for_range_sharding",
mock_mock_get_worker_for_range_sharding,
):
async with (
Nanny(s.address, nthreads=1) as n,
wait_until_worker_has_tasks(
"shuffle-transfer", n.worker_address, 1, s
) as event,
):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-03-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
shuffled = df.shuffle("x", force=True)
fut = c.compute([shuffled, df], sync=True)
await event.wait()
await n.process.process.kill()
result, expected = await fut
dd.assert_eq(result, expected)
await c.close()
await assert_worker_cleanup(a)
await assert_scheduler_cleanup(s)
# @pytest.mark.slow
@gen_cluster(client=True, nthreads=[("", 1)] * 3)
async def test_closed_bystanding_worker_during_shuffle(c, s, w1, w2, w3):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-02-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
shuffled = df.shuffle("x", force=True)
fut = c.compute(
[shuffled, df],
sync=True,
workers=[w1.address, w2.address],
allow_other_workers=False,
)
await wait_for_tasks_in_state("shuffle-transfer", "memory", 1, w1)
await wait_for_tasks_in_state("shuffle-transfer", "memory", 1, w2)
await assert_worker_cleanup(w3, close=True)
result, expected = await fut
dd.assert_eq(result, expected)
await assert_worker_cleanup(w1)
await assert_worker_cleanup(w2)
await assert_scheduler_cleanup(s)
| BlockedGetOrCreateShuffleRunManager |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 91126,
"end": 93643
} | class ____(BaseTest, NewPassManagerMixin):
def test_close(self):
pb = self.pb()
pb.close()
def test_pto(self):
tm = self.target_machine(jit=False)
pto = llvm.create_pipeline_tuning_options(3, 0)
pto.inlining_threshold = 2
pto.loop_interleaving = True
pto.loop_vectorization = True
pto.slp_vectorization = True
pto.loop_unrolling = False
pb = llvm.create_pass_builder(tm, pto)
pb.close()
def test_get_module_pass_manager(self):
pb = self.pb()
mpm = pb.getModulePassManager()
mpm.run(self.module(), pb)
pb.close()
def test_get_function_pass_manager(self):
pb = self.pb()
fpm = pb.getFunctionPassManager()
fpm.run(self.module().get_function("sum"), pb)
pb.close()
def test_time_passes(self):
"""Test pass timing reports for O3 and O0 optimization levels"""
def run_with_timing(speed_level):
mod = self.module()
pb = self.pb(speed_level=speed_level, size_level=0)
pb.start_pass_timing()
mpm = pb.getModulePassManager()
mpm.run(mod, pb)
report = pb.finish_pass_timing()
pb.close()
return report
report_O3 = run_with_timing(3)
report_O0 = run_with_timing(0)
self.assertIsInstance(report_O3, str)
self.assertIsInstance(report_O0, str)
self.assertEqual(report_O3.count("Pass execution timing report"), 1)
self.assertEqual(report_O0.count("Pass execution timing report"), 1)
def test_empty_report(self):
mod = self.module()
pb = self.pb()
mpm = pb.getModulePassManager()
mpm.run(mod, pb)
pb.start_pass_timing()
report = pb.finish_pass_timing()
pb.close()
self.assertFalse(report)
def test_multiple_timers_error(self):
mod = self.module()
pb = self.pb()
pb.start_pass_timing()
mpm = pb.getModulePassManager()
mpm.run(mod, pb)
pb.finish_pass_timing()
with self.assertRaisesRegex(RuntimeError, "only be done once"):
pb.start_pass_timing()
pb.close()
def test_empty_report_error(self):
mod = self.module()
pb = self.pb()
mpm = pb.getModulePassManager()
mpm.run(mod, pb)
with self.assertRaisesRegex(RuntimeError, "not enabled"):
pb.finish_pass_timing()
pb.close()
| TestPassBuilder |
python | django__django | django/forms/fields.py | {
"start": 15976,
"end": 16674
} | class ____(Field):
def __init__(self, *, input_formats=None, **kwargs):
super().__init__(**kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
value = value.strip()
# Try to strptime against each input format.
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages["invalid"], code="invalid")
def strptime(self, value, format):
raise NotImplementedError("Subclasses must define this method.")
| BaseTemporalField |
python | huggingface__transformers | src/transformers/models/flava/modeling_flava.py | {
"start": 6807,
"end": 14167
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor`, *optional*, returned when `return_loss` is True):
Total loss calculated for this model.
loss_info (`FlavaLosses`):
Detailed info for FLAVA Pretraining losses. Check `FlavaLosses` class description for the information on
the keys.
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`].
image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`].
text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
The output of the [`FlavaTextModel`].
multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
The output of the [`FlavaMultimodalModel`].
image_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`]. Uses `bool_masked_pos`
to create masked images.
image_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images.
text_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids_masked` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` are present):
The output of the [`FlavaTextModel`].
multimodal_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
The output of the [`FlavaMultimodalModel`].
mim_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape `(total_masked_patches, image_vocab_size)` , *optional*, returned when `pixel_values` are present and `input_ids_masked` are not):
The logits for MIM unimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is
returned when `bool_masked_pos` has some of the patches masked.
mlm_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(total_masked_seq_length, text_vocab_size)`, *optional*, returned when `input_ids_masked` are present and `pixel_values` are not):
The logits for MLM unimodal loss. The flattened output is returned when `input_ids_masked` has some of
the tokens masked.
itm_logits (`torch.FloatTensor` of shape `(batch_size, 2)`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
The logits for ITM loss. Note that ITM loss is calculated on masked pairs in FLAVA.
contrastive_logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeddings` and `text_embeddings` but passed through FLAVA's
`image_projection` and `text_projection` layers respectively. This represents the image-text similarity
scores. This is calculated on unmasked images and texts.
contrastive_logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeddings` and `image_embeddings` but passed through FLAVA's
`text_projection` and `image_projection` layers respectively. This is calculated on unmasked images and
texts.
mmm_image_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape`(total_masked_patches, image_vocab_size)`, *optional*, returned when `pixel_values` and `input_ids_masked` are present):
The logits for MMM image multimodal loss. Uses `book_masked_pos` to get masked patches. The flattened
output is returned when `bool_masked_pos` has some of the patches masked.
mmm_text_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(`(total_masked_seq_length, text_vocab_size)`), *optional*, returned when `pixel_values` and `input_ids_masked` are present):
The logits for MMM text multimodal loss. The flattened output is returned when `input_ids_masked` has
some of the tokens masked.
"""
loss: Optional[torch.FloatTensor] = None
loss_info: FlavaLosses = None
image_embeddings: Optional[torch.FloatTensor] = None
image_output: Optional[BaseModelOutputWithPooling] = None
text_embeddings: Optional[torch.FloatTensor] = None
text_output: Optional[BaseModelOutputWithPooling] = None
multimodal_embeddings: Optional[torch.FloatTensor] = None
multimodal_output: Optional[BaseModelOutputWithPooling] = None
image_masked_embeddings: Optional[torch.FloatTensor] = None
image_masked_output: Optional[BaseModelOutputWithPooling] = None
text_masked_embeddings: Optional[torch.FloatTensor] = None
text_masked_output: Optional[BaseModelOutputWithPooling] = None
multimodal_masked_embeddings: Optional[torch.FloatTensor] = None
multimodal_masked_output: Optional[BaseModelOutputWithPooling] = None
mim_logits: Optional[torch.FloatTensor] = None
mlm_logits: Optional[torch.FloatTensor] = None
itm_logits: Optional[torch.FloatTensor] = None
contrastive_logits_per_image: Optional[torch.FloatTensor] = None
contrastive_logits_per_text: Optional[torch.FloatTensor] = None
mmm_image_logits: Optional[torch.FloatTensor] = None
mmm_text_logits: Optional[torch.FloatTensor] = None
def to_tuple(self) -> tuple[Any]:
transformer_outputs = [
"text_output",
"image_output",
"multimodal_output",
"text_masked_output",
"image_masked_output",
"multimodal_masked_output",
]
return tuple(self[k] if k not in transformer_outputs else getattr(self, k).to_tuple() for k in self.keys())
# Based on timm implementation, which can be found here:
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py
| FlavaForPreTrainingOutput |
python | sympy__sympy | sympy/stats/symbolic_probability.py | {
"start": 19536,
"end": 21311
} | class ____(Expr):
"""
Symbolic class for Moment
Examples
========
>>> from sympy import Symbol, Integral
>>> from sympy.stats import Normal, Expectation, Probability, Moment
>>> mu = Symbol('mu', real=True)
>>> sigma = Symbol('sigma', positive=True)
>>> X = Normal('X', mu, sigma)
>>> M = Moment(X, 3, 1)
To evaluate the result of Moment use `doit`:
>>> M.doit()
mu**3 - 3*mu**2 + 3*mu*sigma**2 + 3*mu - 3*sigma**2 - 1
Rewrite the Moment expression in terms of Expectation:
>>> M.rewrite(Expectation)
Expectation((X - 1)**3)
Rewrite the Moment expression in terms of Probability:
>>> M.rewrite(Probability)
Integral((x - 1)**3*Probability(Eq(X, x)), (x, -oo, oo))
Rewrite the Moment expression in terms of Integral:
>>> M.rewrite(Integral)
Integral(sqrt(2)*(X - 1)**3*exp(-(X - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (X, -oo, oo))
"""
def __new__(cls, X, n, c=0, condition=None, **kwargs):
X = _sympify(X)
n = _sympify(n)
c = _sympify(c)
if condition is not None:
condition = _sympify(condition)
return super().__new__(cls, X, n, c, condition)
else:
return super().__new__(cls, X, n, c)
def doit(self, **hints):
return self.rewrite(Expectation).doit(**hints)
def _eval_rewrite_as_Expectation(self, X, n, c=0, condition=None, **kwargs):
return Expectation((X - c)**n, condition)
def _eval_rewrite_as_Probability(self, X, n, c=0, condition=None, **kwargs):
return self.rewrite(Expectation).rewrite(Probability)
def _eval_rewrite_as_Integral(self, X, n, c=0, condition=None, **kwargs):
return self.rewrite(Expectation).rewrite(Integral)
| Moment |
python | doocs__leetcode | lcp/LCP 63. 弹珠游戏/Solution.py | {
"start": 0,
"end": 1127
} | class ____:
def ballGame(self, num: int, plate: List[str]) -> List[List[int]]:
def check(i, j, d):
k = num
while plate[i][j] != 'O':
if k == 0:
return False
if plate[i][j] == 'W':
d = (d + 3) % 4
elif plate[i][j] == 'E':
d = (d + 1) % 4
i, j = i + dirs[d], j + dirs[d + 1]
if not (0 <= i < m and 0 <= j < n):
return False
k -= 1
return True
dirs = (0, 1, 0, -1, 0)
m, n = len(plate), len(plate[0])
ans = []
for i in range(1, m - 1):
if plate[i][0] == '.' and check(i, 0, 0):
ans.append([i, 0])
if plate[i][n - 1] == '.' and check(i, n - 1, 2):
ans.append([i, n - 1])
for j in range(1, n - 1):
if plate[0][j] == '.' and check(0, j, 1):
ans.append([0, j])
if plate[m - 1][j] == '.' and check(m - 1, j, 3):
ans.append([m - 1, j])
return ans
| Solution |
python | google__pytype | pytype/tools/analyze_project/config_test.py | {
"start": 6066,
"end": 7706
} | class ____(TestBase):
"""Test config.read_config_file_or_die()."""
def test_config_file(self):
with test_utils.Tempdir() as d:
f = d.create_file('test.cfg', PYTYPE_CFG)
conf = config.read_config_file_or_die(f)
self._validate_file_contents(conf, d.path)
def test_missing_config_file_section(self):
with test_utils.Tempdir() as d:
f = d.create_file('test.cfg', RANDOM_CFG)
with self.assertRaises(SystemExit):
config.read_config_file_or_die(f)
def test_setup_cfg(self):
with test_utils.Tempdir() as d:
d.create_file('setup.cfg', SETUP_CFG)
with file_utils.cd(d.path):
conf = config.read_config_file_or_die(None)
self._validate_file_contents(conf, d.path)
def test_setup_cfg_from_subdir(self):
with test_utils.Tempdir() as d:
d.create_file('setup.cfg', SETUP_CFG)
sub = d.create_directory(file_utils.replace_separator('x/y/z'))
with file_utils.cd(sub):
conf = config.read_config_file_or_die(None)
self._validate_file_contents(conf, d.path)
def test_missing_setup_cfg_section(self):
with test_utils.Tempdir() as d:
d.create_file('setup.cfg', RANDOM_CFG)
with file_utils.cd(d.path):
conf = config.read_config_file_or_die(None)
self._validate_empty_contents(conf)
def test_pyproject_toml(self):
with test_utils.Tempdir() as d:
d.create_file('pyproject.toml', PYPROJECT_TOML)
with file_utils.cd(d.path):
conf = config.read_config_file_or_die(None)
self._validate_file_contents(conf, d.path)
if __name__ == '__main__':
unittest.main()
| TestReadConfig |
python | encode__django-rest-framework | tests/authentication/test_authentication.py | {
"start": 822,
"end": 902
} | class ____(TokenAuthentication):
model = CustomToken
| CustomTokenAuthentication |
python | modin-project__modin | modin/tests/pandas/native_df_interoperability/test_compiler_caster.py | {
"start": 7877,
"end": 8042
} | class ____(CalculatorTestQc):
"Represents a query compiler with no costing information"
def get_backend(self):
return "Test_Casting_Default"
| DefaultQC |
python | modin-project__modin | modin/core/io/text/text_file_dispatcher.py | {
"start": 1759,
"end": 46512
} | class ____(FileDispatcher):
"""Class handles utils for reading text formats files."""
@classmethod
def get_path_or_buffer(cls, filepath_or_buffer):
"""
Extract path from `filepath_or_buffer`.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_csv` function.
Returns
-------
str or path object
verified `filepath_or_buffer` parameter.
Notes
-----
Given a buffer, try and extract the filepath from it so that we can
use it without having to fall back to pandas and share file objects between
workers. Given a filepath, return it immediately.
"""
if (
hasattr(filepath_or_buffer, "name")
and hasattr(filepath_or_buffer, "seekable")
and filepath_or_buffer.seekable()
and filepath_or_buffer.tell() == 0
):
buffer_filepath = filepath_or_buffer.name
if cls.file_exists(buffer_filepath):
warnings.warn(
"For performance reasons, the filepath will be "
+ "used in place of the file handle passed in "
+ "to load the data"
)
return cls.get_path(buffer_filepath)
return filepath_or_buffer
@classmethod
def build_partition(cls, partition_ids, row_lengths, column_widths):
"""
Build array with partitions of `cls.frame_partition_cls` class.
Parameters
----------
partition_ids : list
Array with references to the partitions data.
row_lengths : list
Partitions rows lengths.
column_widths : list
Number of columns in each partition.
Returns
-------
np.ndarray
array with shape equals to the shape of `partition_ids` and
filed with partitions objects.
"""
return np.array(
[
[
cls.frame_partition_cls(
partition_ids[i][j],
length=row_lengths[i],
width=column_widths[j],
)
for j in range(len(partition_ids[i]))
]
for i in range(len(partition_ids))
]
)
@classmethod
def pathlib_or_pypath(cls, filepath_or_buffer):
"""
Check if `filepath_or_buffer` is instance of `py.path.local` or `pathlib.Path`.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_csv` function.
Returns
-------
bool
Whether or not `filepath_or_buffer` is instance of `py.path.local`
or `pathlib.Path`.
"""
try:
import py
if isinstance(filepath_or_buffer, py.path.local):
return True
except ImportError: # pragma: no cover
pass
try:
import pathlib
if isinstance(filepath_or_buffer, pathlib.Path):
return True
except ImportError: # pragma: no cover
pass
return False
@classmethod
def offset(
cls,
f,
offset_size: int,
quotechar: bytes = b'"',
is_quoting: bool = True,
encoding: str = None,
newline: bytes = None,
):
"""
Move the file offset at the specified amount of bytes.
Parameters
----------
f : file-like object
File handle that should be used for offset movement.
offset_size : int
Number of bytes to read and ignore.
quotechar : bytes, default: b'"'
Indicate quote in a file.
is_quoting : bool, default: True
Whether or not to consider quotes.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
Returns
-------
bool
If file pointer reached the end of the file, but did not find
closing quote returns `False`. `True` in any other case.
"""
if is_quoting:
chunk = f.read(offset_size)
outside_quotes = not chunk.count(quotechar) % 2
else:
f.seek(offset_size, os.SEEK_CUR)
outside_quotes = True
# after we read `offset_size` bytes, we most likely break the line but
# the modin implementation doesn't work correctly in the case, so we must
# make sure that the line is read completely to the lineterminator,
# which is what the `_read_rows` does
outside_quotes, _ = cls._read_rows(
f,
nrows=1,
quotechar=quotechar,
is_quoting=is_quoting,
outside_quotes=outside_quotes,
encoding=encoding,
newline=newline,
)
return outside_quotes
@classmethod
def partitioned_file(
cls,
f,
num_partitions: int = None,
nrows: int = None,
skiprows: int = None,
quotechar: bytes = b'"',
is_quoting: bool = True,
encoding: str = None,
newline: bytes = None,
header_size: int = 0,
pre_reading: int = 0,
get_metadata_kw: dict = None,
):
"""
Compute chunk sizes in bytes for every partition.
Parameters
----------
f : file-like object
File handle of file to be partitioned.
num_partitions : int, optional
For what number of partitions split a file.
If not specified grabs the value from `modin.config.NPartitions.get()`.
nrows : int, optional
Number of rows of file to read.
skiprows : int, optional
Specifies rows to skip.
quotechar : bytes, default: b'"'
Indicate quote in a file.
is_quoting : bool, default: True
Whether or not to consider quotes.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
header_size : int, default: 0
Number of rows, that occupied by header.
pre_reading : int, default: 0
Number of rows between header and skipped rows, that should be read.
get_metadata_kw : dict, optional
Keyword arguments for `cls.read_callback` to compute metadata if needed.
This option is not compatible with `pre_reading!=0`.
Returns
-------
list
List with the next elements:
int : partition start read byte
int : partition end read byte
pandas.DataFrame or None
Dataframe from which metadata can be retrieved. Can be None if `get_metadata_kw=None`.
"""
if get_metadata_kw is not None and pre_reading != 0:
raise ValueError(
f"Incompatible combination of parameters: {get_metadata_kw=}, {pre_reading=}"
)
read_rows_counter = 0
outside_quotes = True
if num_partitions is None:
num_partitions = NPartitions.get() - 1 if pre_reading else NPartitions.get()
rows_skipper = cls.rows_skipper_builder(
f, quotechar, is_quoting=is_quoting, encoding=encoding, newline=newline
)
result = []
file_size = cls.file_size(f)
pd_df_metadata = None
if pre_reading:
rows_skipper(header_size)
pre_reading_start = f.tell()
outside_quotes, read_rows = cls._read_rows(
f,
nrows=pre_reading,
quotechar=quotechar,
is_quoting=is_quoting,
outside_quotes=outside_quotes,
encoding=encoding,
newline=newline,
)
read_rows_counter += read_rows
result.append((pre_reading_start, f.tell()))
# add outside_quotes
if is_quoting and not outside_quotes:
warnings.warn("File has mismatched quotes")
rows_skipper(skiprows)
else:
rows_skipper(skiprows)
if get_metadata_kw:
start = f.tell()
# For correct behavior, if we want to avoid double skipping rows,
# we need to get metadata after skipping.
pd_df_metadata = cls.read_callback(f, **get_metadata_kw)
f.seek(start)
rows_skipper(header_size)
start = f.tell()
if nrows:
partition_size = max(1, num_partitions, nrows // num_partitions)
while f.tell() < file_size and read_rows_counter < nrows:
if read_rows_counter + partition_size > nrows:
# it's possible only if is_quoting==True
partition_size = nrows - read_rows_counter
outside_quotes, read_rows = cls._read_rows(
f,
nrows=partition_size,
quotechar=quotechar,
is_quoting=is_quoting,
encoding=encoding,
newline=newline,
)
result.append((start, f.tell()))
start = f.tell()
read_rows_counter += read_rows
# add outside_quotes
if is_quoting and not outside_quotes:
warnings.warn("File has mismatched quotes")
else:
partition_size = max(1, num_partitions, file_size // num_partitions)
while f.tell() < file_size:
outside_quotes = cls.offset(
f,
offset_size=partition_size,
quotechar=quotechar,
is_quoting=is_quoting,
encoding=encoding,
newline=newline,
)
result.append((start, f.tell()))
start = f.tell()
# add outside_quotes
if is_quoting and not outside_quotes:
warnings.warn("File has mismatched quotes")
return result, pd_df_metadata
@classmethod
def _read_rows(
cls,
f,
nrows: int,
quotechar: bytes = b'"',
is_quoting: bool = True,
outside_quotes: bool = True,
encoding: str = None,
newline: bytes = None,
):
"""
Move the file offset at the specified amount of rows.
Parameters
----------
f : file-like object
File handle that should be used for offset movement.
nrows : int
Number of rows to read.
quotechar : bytes, default: b'"'
Indicate quote in a file.
is_quoting : bool, default: True
Whether or not to consider quotes.
outside_quotes : bool, default: True
Whether the file pointer is within quotes or not at the time this function is called.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
Returns
-------
bool
If file pointer reached the end of the file, but did not find closing quote
returns `False`. `True` in any other case.
int
Number of rows that were read.
"""
if nrows is not None and nrows <= 0:
return True, 0
rows_read = 0
if encoding and (
"utf" in encoding
and "8" not in encoding
or encoding == "unicode_escape"
or encoding.replace("-", "_") == "utf_8_sig"
):
iterator = CustomNewlineIterator(f, newline)
else:
iterator = f
for line in iterator:
if is_quoting and line.count(quotechar) % 2:
outside_quotes = not outside_quotes
if outside_quotes:
rows_read += 1
if rows_read >= nrows:
break
if isinstance(iterator, CustomNewlineIterator):
iterator.seek()
# case when EOF
if not outside_quotes:
rows_read += 1
return outside_quotes, rows_read
@classmethod
def compute_newline(cls, file_like, encoding, quotechar):
"""
Compute byte or sequence of bytes indicating line endings.
Parameters
----------
file_like : file-like object
File handle that should be used for line endings computing.
encoding : str
Encoding of `file_like`.
quotechar : str
Quotechar used for parsing `file-like`.
Returns
-------
bytes
line endings
"""
newline = None
if encoding is None:
return newline, quotechar.encode("UTF-8")
quotechar = quotechar.encode(encoding)
encoding = encoding.replace("-", "_")
if (
"utf" in encoding
and "8" not in encoding
or encoding == "unicode_escape"
or encoding == "utf_8_sig"
):
# trigger for computing f.newlines
file_like.readline()
# in bytes
newline = file_like.newlines.encode(encoding)
boms = ()
if encoding == "utf_8_sig":
boms = (codecs.BOM_UTF8,)
elif "16" in encoding:
boms = (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)
elif "32" in encoding:
boms = (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)
for bom in boms:
if newline.startswith(bom):
bom_len = len(bom)
newline = newline[bom_len:]
quotechar = quotechar[bom_len:]
break
return newline, quotechar
# _read helper functions
@classmethod
def rows_skipper_builder(
cls, f, quotechar, is_quoting, encoding=None, newline=None
):
"""
Build object for skipping passed number of lines.
Parameters
----------
f : file-like object
File handle that should be used for offset movement.
quotechar : bytes
Indicate quote in a file.
is_quoting : bool
Whether or not to consider quotes.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
Returns
-------
object
skipper object.
"""
def skipper(n):
if n == 0 or n is None:
return 0
else:
return cls._read_rows(
f,
quotechar=quotechar,
is_quoting=is_quoting,
nrows=n,
encoding=encoding,
newline=newline,
)[1]
return skipper
@classmethod
def _define_header_size(
cls,
header: Union[int, Sequence[int], str, None] = "infer",
names: Optional[Sequence] = lib.no_default,
) -> int:
"""
Define the number of rows that are used by header.
Parameters
----------
header : int, list of int or str, default: "infer"
Original `header` parameter of `read_csv` function.
names : array-like, optional
Original names parameter of `read_csv` function.
Returns
-------
header_size : int
The number of rows that are used by header.
"""
header_size = 0
if header == "infer" and names in [lib.no_default, None]:
header_size += 1
elif isinstance(header, int):
header_size += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
header_size += max(header) + 1
return header_size
@classmethod
def _define_metadata(
cls,
df: pandas.DataFrame,
column_names: ColumnNamesTypes,
) -> Tuple[list, int]:
"""
Define partitioning metadata.
Parameters
----------
df : pandas.DataFrame
The DataFrame to split.
column_names : ColumnNamesTypes
Column names of df.
Returns
-------
column_widths : list
Column width to use during new frame creation (number of
columns for each partition).
num_splits : int
The maximum number of splits to separate the DataFrame into.
"""
# This is the number of splits for the columns
num_splits = min(len(column_names) or 1, NPartitions.get())
min_block_size = MinColumnPartitionSize.get()
column_chunksize = compute_chunksize(df.shape[1], num_splits, min_block_size)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
# split columns into chunks with maximal size column_chunksize, for example
# if num_splits == 4, len(column_names) == 80 and column_chunksize == 32,
# column_widths will be [32, 32, 16, 0]
column_widths = [
(
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else (
0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
)
)
for i in range(num_splits)
]
return column_widths, num_splits
_parse_func = None
@classmethod
def preprocess_func(cls): # noqa: RT01
"""Prepare a function for transmission to remote workers."""
if cls._parse_func is None:
cls._parse_func = cls.put(cls.parse)
return cls._parse_func
@classmethod
def _launch_tasks(
cls, splits: list, *partition_args, **partition_kwargs
) -> Tuple[list, list, list]:
"""
Launch tasks to read partitions.
Parameters
----------
splits : list
List of tuples with partitions data, which defines
parser task (start/end read bytes and etc.).
*partition_args : tuple
Positional arguments to be passed to the parser function.
**partition_kwargs : dict
`kwargs` that should be passed to the parser function.
Returns
-------
partition_ids : list
array with references to the partitions data.
index_ids : list
array with references to the partitions index objects.
dtypes_ids : list
array with references to the partitions dtypes objects.
"""
partition_ids = [None] * len(splits)
index_ids = [None] * len(splits)
dtypes_ids = [None] * len(splits)
# this is done mostly for performance; see PR#5678 for details
func = cls.preprocess_func()
for idx, (start, end) in enumerate(splits):
partition_kwargs.update({"start": start, "end": end})
*partition_ids[idx], index_ids[idx], dtypes_ids[idx] = cls.deploy(
func=func,
f_args=partition_args,
f_kwargs=partition_kwargs,
num_returns=partition_kwargs.get("num_splits") + 2,
)
return partition_ids, index_ids, dtypes_ids
@classmethod
def check_parameters_support(
cls,
filepath_or_buffer,
read_kwargs: dict,
skiprows_md: Union[Sequence, callable, int],
header_size: int,
) -> Tuple[bool, Optional[str]]:
"""
Check support of only general parameters of `read_*` function.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_*` function.
read_kwargs : dict
Parameters of `read_*` function.
skiprows_md : int, array or callable
`skiprows` parameter modified for easier handling by Modin.
header_size : int
Number of rows that are used by header.
Returns
-------
bool
Whether passed parameters are supported or not.
Optional[str]
`None` if parameters are supported, otherwise an error
message describing why parameters are not supported.
"""
skiprows = read_kwargs.get("skiprows")
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(
filepath_or_buffer, read_kwargs.get("storage_options")
):
return (False, cls._file_not_found_msg(filepath_or_buffer))
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return (False, cls.BUFFER_UNSUPPORTED_MSG)
if read_kwargs["chunksize"] is not None:
return (False, "`chunksize` parameter is not supported")
if read_kwargs.get("iterator"):
return (False, "`iterator==True` parameter is not supported")
if read_kwargs.get("dialect") is not None:
return (False, "`dialect` parameter is not supported")
if read_kwargs["lineterminator"] is not None:
return (False, "`lineterminator` parameter is not supported")
if read_kwargs["escapechar"] is not None:
return (False, "`escapechar` parameter is not supported")
if read_kwargs.get("skipfooter"):
if read_kwargs.get("nrows") or read_kwargs.get("engine") == "c":
return (False, "Exception is raised by pandas itself")
skiprows_supported = True
if is_list_like(skiprows_md) and skiprows_md[0] < header_size:
skiprows_supported = False
elif callable(skiprows):
# check if `skiprows` callable gives True for any of header indices
is_intersection = any(
cls._get_skip_mask(pandas.RangeIndex(header_size), skiprows)
)
if is_intersection:
skiprows_supported = False
if not skiprows_supported:
return (
False,
"Values of `header` and `skiprows` parameters have intersections; "
+ "this case is unsupported by Modin",
)
return (True, None)
@classmethod
@_inherit_docstrings(pandas.io.parsers.base_parser.ParserBase._validate_usecols_arg)
def _validate_usecols_arg(cls, usecols):
msg = (
"'usecols' must either be list-like of all strings, all unicode, "
+ "all integers or a callable."
)
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
@classmethod
def _manage_skiprows_parameter(
cls,
skiprows: Union[int, Sequence[int], Callable, None] = None,
header_size: int = 0,
) -> Tuple[Union[int, Sequence, Callable], bool, int]:
"""
Manage `skiprows` parameter of read_csv and read_fwf functions.
Change `skiprows` parameter in the way Modin could more optimally
process it. `csv_dispatcher` and `fwf_dispatcher` have two mechanisms of rows skipping:
1) During file partitioning (setting of file limits that should be read
by each partition) exact rows can be excluded from partitioning scope,
thus they won't be read at all and can be considered as skipped. This is
the most effective way of rows skipping (since it doesn't require any
actual data reading and postprocessing), but in this case `skiprows`
parameter can be an integer only. When it possible Modin always uses
this approach by setting of `skiprows_partitioning` return value.
2) Rows for skipping can be dropped after full dataset import. This is
more expensive way since it requires extra IO work and postprocessing
afterwards, but `skiprows` parameter can be of any non-integer type
supported by any pandas read function. These rows is
specified by setting of `skiprows_md` return value.
In some cases, if `skiprows` is uniformly distributed array (e.g. [1,2,3]),
`skiprows` can be "squashed" and represented as integer to make a fastpath.
If there is a gap between the first row for skipping and the last line of
the header (that will be skipped too), then assign to read this gap first
(assign the first partition to read these rows be setting of `pre_reading`
return value). See `Examples` section for details.
Parameters
----------
skiprows : int, array or callable, optional
Original `skiprows` parameter of any pandas read function.
header_size : int, default: 0
Number of rows that are used by header.
Returns
-------
skiprows_md : int, array or callable
Updated skiprows parameter. If `skiprows` is an array, this
array will be sorted. Also parameter will be aligned to
actual data in the `query_compiler` (which, for example,
doesn't contain header rows)
pre_reading : int
The number of rows that should be read before data file
splitting for further reading (the number of rows for
the first partition).
skiprows_partitioning : int
The number of rows that should be skipped virtually (skipped during
data file partitioning).
Examples
--------
Let's consider case when `header`="infer" and `skiprows`=[3,4,5]. In
this specific case fastpath can be done since `skiprows` is uniformly
distributed array, so we can "squash" it to integer and set
`skiprows_partitioning`=3. But if no additional action will be done,
these three rows will be skipped right after header line, that corresponds
to `skiprows`=[1,2,3]. Now, to avoid this discrepancy, we need to assign
the first partition to read data between header line and the first
row for skipping by setting of `pre_reading` parameter, so setting
`pre_reading`=2. During data file partitiong, these lines will be assigned
for reading for the first partition, and then file position will be set at
the beginning of rows that should be skipped by `skiprows_partitioning`.
After skipping of these rows, the rest data will be divided between the
rest of partitions, see rows assignement below:
0 - header line (skip during partitioning)
1 - pre_reading (assign to read by the first partition)
2 - pre_reading (assign to read by the first partition)
3 - skiprows_partitioning (skip during partitioning)
4 - skiprows_partitioning (skip during partitioning)
5 - skiprows_partitioning (skip during partitioning)
6 - data to partition (divide between the rest of partitions)
7 - data to partition (divide between the rest of partitions)
"""
pre_reading = skiprows_partitioning = skiprows_md = 0
if isinstance(skiprows, int):
skiprows_partitioning = skiprows
elif is_list_like(skiprows) and len(skiprows) > 0:
skiprows_md = np.sort(skiprows)
if np.all(np.diff(skiprows_md) == 1):
# `skiprows` is uniformly distributed array.
pre_reading = (
skiprows_md[0] - header_size if skiprows_md[0] > header_size else 0
)
skiprows_partitioning = len(skiprows_md)
skiprows_md = 0
elif skiprows_md[0] > header_size:
skiprows_md = skiprows_md - header_size
elif callable(skiprows):
def skiprows_func(x):
return skiprows(x + header_size)
skiprows_md = skiprows_func
return skiprows_md, pre_reading, skiprows_partitioning
@classmethod
def _define_index(
cls,
index_ids: list,
index_name: str,
) -> Tuple[IndexColType, list]:
"""
Compute the resulting DataFrame index and index lengths for each of partitions.
Parameters
----------
index_ids : list
Array with references to the partitions index objects.
index_name : str
Name that should be assigned to the index if `index_col`
is not provided.
Returns
-------
new_index : IndexColType
Index that should be passed to the new_frame constructor.
row_lengths : list
Partitions rows lengths.
"""
index_objs = cls.materialize(index_ids)
if len(index_objs) == 0 or isinstance(index_objs[0], int):
row_lengths = index_objs
new_index = pandas.RangeIndex(sum(index_objs))
else:
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = index_name
return new_index, row_lengths
@classmethod
def _get_new_qc(
cls,
partition_ids: list,
index_ids: list,
dtypes_ids: list,
index_col: IndexColType,
index_name: str,
column_widths: list,
column_names: ColumnNamesTypes,
skiprows_md: Union[Sequence, callable, None] = None,
header_size: int = None,
**kwargs,
):
"""
Get new query compiler from data received from workers.
Parameters
----------
partition_ids : list
Array with references to the partitions data.
index_ids : list
Array with references to the partitions index objects.
dtypes_ids : list
Array with references to the partitions dtypes objects.
index_col : IndexColType
`index_col` parameter of `read_csv` function.
index_name : str
Name that should be assigned to the index if `index_col`
is not provided.
column_widths : list
Number of columns in each partition.
column_names : ColumnNamesTypes
Array with columns names.
skiprows_md : array-like or callable, optional
Specifies rows to skip.
header_size : int, default: 0
Number of rows, that occupied by header.
**kwargs : dict
Parameters of `read_csv` function needed for postprocessing.
Returns
-------
new_query_compiler : BaseQueryCompiler
New query compiler, created from `new_frame`.
"""
partition_ids = cls.build_partition(
partition_ids, [None] * len(index_ids), column_widths
)
new_frame = cls.frame_cls(
partition_ids,
lambda: cls._define_index(index_ids, index_name),
column_names,
None,
column_widths,
dtypes=lambda: cls.get_dtypes(dtypes_ids, column_names),
)
new_query_compiler = cls.query_compiler_cls(new_frame)
skipfooter = kwargs.get("skipfooter", None)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if skiprows_md is not None:
# skip rows that passed as array or callable
nrows = kwargs.get("nrows", None)
index_range = pandas.RangeIndex(len(new_query_compiler.index))
if is_list_like(skiprows_md):
new_query_compiler = new_query_compiler.take_2d_positional(
index=index_range.delete(skiprows_md)
)
elif callable(skiprows_md):
skip_mask = cls._get_skip_mask(index_range, skiprows_md)
if not isinstance(skip_mask, np.ndarray):
skip_mask = skip_mask.to_numpy("bool")
view_idx = index_range[~skip_mask]
new_query_compiler = new_query_compiler.take_2d_positional(
index=view_idx
)
else:
raise TypeError(
f"Not acceptable type of `skiprows` parameter: {type(skiprows_md)}"
)
if not isinstance(new_query_compiler.index, pandas.MultiIndex):
new_query_compiler = new_query_compiler.reset_index(drop=True)
if nrows:
new_query_compiler = new_query_compiler.take_2d_positional(
pandas.RangeIndex(len(new_query_compiler.index))[:nrows]
)
if index_col is None or index_col is False:
new_query_compiler._modin_frame.synchronize_labels(axis=0)
return new_query_compiler
@classmethod
def _read(cls, filepath_or_buffer, **kwargs):
"""
Read data from `filepath_or_buffer` according to `kwargs` parameters.
Used in `read_csv` and `read_fwf` Modin implementations.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of read functions.
**kwargs : dict
Parameters of read functions.
Returns
-------
new_query_compiler : BaseQueryCompiler
Query compiler with imported data for further processing.
"""
filepath_or_buffer = stringify_path(filepath_or_buffer)
filepath_or_buffer_md = (
cls.get_path(filepath_or_buffer)
if isinstance(filepath_or_buffer, str)
else cls.get_path_or_buffer(filepath_or_buffer)
)
compression_infered = cls.infer_compression(
filepath_or_buffer, kwargs["compression"]
)
# Getting frequently used kwargs;
# They should be defined in higher level
names = kwargs["names"]
index_col = kwargs["index_col"]
encoding = kwargs["encoding"]
skiprows = kwargs["skiprows"]
header = kwargs["header"]
# Define header size for further skipping (Header can be skipped because header
# information will be obtained further from empty_df, so no need to handle it
# by workers)
header_size = cls._define_header_size(
header,
names,
)
(
skiprows_md,
pre_reading,
skiprows_partitioning,
) = cls._manage_skiprows_parameter(skiprows, header_size)
should_handle_skiprows = skiprows_md is not None and not isinstance(
skiprows_md, int
)
(use_modin_impl, fallback_reason) = cls.check_parameters_support(
filepath_or_buffer_md,
kwargs,
skiprows_md,
header_size,
)
if not use_modin_impl:
return cls.single_worker_read(
filepath_or_buffer,
kwargs,
reason=fallback_reason,
)
is_quoting = kwargs["quoting"] != QUOTE_NONE
usecols = kwargs["usecols"]
use_inferred_column_names = cls._uses_inferred_column_names(
names, skiprows, kwargs["skipfooter"], usecols
)
# Computing metadata simultaneously with skipping rows allows us to not
# do extra work and improve performance for certain cases, as otherwise,
# it would require double re-reading of skipped rows in order to retrieve metadata.
can_compute_metadata_while_skipping_rows = (
# basic supported case: isinstance(skiprows, int) without any additional params
isinstance(skiprows, int)
and (usecols is None or skiprows is None)
and pre_reading == 0
)
get_metadata_kw = dict(kwargs, nrows=1, skipfooter=0, index_col=index_col)
if get_metadata_kw.get("engine", None) == "pyarrow":
# pyarrow engine doesn't support `nrows` option;
# https://github.com/pandas-dev/pandas/issues/38872 can be used to track pyarrow engine features
get_metadata_kw["engine"] = "c"
if not can_compute_metadata_while_skipping_rows:
pd_df_metadata = cls.read_callback(
filepath_or_buffer_md,
**get_metadata_kw,
)
column_names = pd_df_metadata.columns
column_widths, num_splits = cls._define_metadata(
pd_df_metadata, column_names
)
get_metadata_kw = None
else:
get_metadata_kw = dict(get_metadata_kw, skiprows=None)
# `memory_map` doesn't work with file-like object so we can't use it here.
# We can definitely skip it without violating the reading logic
# since this parameter is intended to optimize reading.
# For reading a couple of lines, this is not essential.
get_metadata_kw.pop("memory_map", None)
# These parameters are already used when opening file `f`,
# they do not need to be used again.
get_metadata_kw.pop("storage_options", None)
get_metadata_kw.pop("compression", None)
with OpenFile(
filepath_or_buffer_md,
"rb",
compression_infered,
**(kwargs.get("storage_options", None) or {}),
) as f:
old_pos = f.tell()
fio = io.TextIOWrapper(f, encoding=encoding, newline="")
newline, quotechar = cls.compute_newline(
fio, encoding, kwargs.get("quotechar", '"')
)
f.seek(old_pos)
splits, pd_df_metadata_temp = cls.partitioned_file(
f,
num_partitions=NPartitions.get(),
nrows=kwargs["nrows"] if not should_handle_skiprows else None,
skiprows=skiprows_partitioning,
quotechar=quotechar,
is_quoting=is_quoting,
encoding=encoding,
newline=newline,
header_size=header_size,
pre_reading=pre_reading,
get_metadata_kw=get_metadata_kw,
)
if can_compute_metadata_while_skipping_rows:
pd_df_metadata = pd_df_metadata_temp
# compute dtypes if possible
common_dtypes = None
if kwargs["dtype"] is None:
most_common_dtype = (object,)
common_dtypes = {}
for col, dtype in pd_df_metadata.dtypes.to_dict().items():
if dtype in most_common_dtype:
common_dtypes[col] = dtype
column_names = pd_df_metadata.columns
column_widths, num_splits = cls._define_metadata(pd_df_metadata, column_names)
# kwargs that will be passed to the workers
partition_kwargs = dict(
kwargs,
header_size=0 if use_inferred_column_names else header_size,
names=column_names if use_inferred_column_names else names,
header="infer" if use_inferred_column_names else header,
skipfooter=0,
skiprows=None,
nrows=None,
compression=compression_infered,
common_dtypes=common_dtypes,
)
# this is done mostly for performance; see PR#5678 for details
filepath_or_buffer_md_ref = cls.put(filepath_or_buffer_md)
kwargs_ref = cls.put(partition_kwargs)
partition_ids, index_ids, dtypes_ids = cls._launch_tasks(
splits,
filepath_or_buffer_md_ref,
kwargs_ref,
num_splits=num_splits,
)
new_query_compiler = cls._get_new_qc(
partition_ids=partition_ids,
index_ids=index_ids,
dtypes_ids=dtypes_ids,
index_col=index_col,
index_name=pd_df_metadata.index.name,
column_widths=column_widths,
column_names=column_names,
skiprows_md=skiprows_md if should_handle_skiprows else None,
header_size=header_size,
skipfooter=kwargs["skipfooter"],
parse_dates=kwargs["parse_dates"],
nrows=kwargs["nrows"] if should_handle_skiprows else None,
)
return new_query_compiler
@classmethod
def _get_skip_mask(cls, rows_index: pandas.Index, skiprows: Callable):
"""
Get mask of skipped by callable `skiprows` rows.
Parameters
----------
rows_index : pandas.Index
Rows index to get mask for.
skiprows : Callable
Callable to check whether row index should be skipped.
Returns
-------
pandas.Index
"""
try:
# direct `skiprows` call is more efficient than using of
# map method, but in some cases it can work incorrectly, e.g.
# when `skiprows` contains `in` operator
mask = skiprows(rows_index)
assert is_list_like(mask)
except (ValueError, TypeError, AssertionError):
# ValueError can be raised if `skiprows` callable contains membership operator
# TypeError is raised if `skiprows` callable contains bitwise operator
# AssertionError is raised if unexpected behavior was detected
mask = rows_index.map(skiprows)
return mask
@staticmethod
def _uses_inferred_column_names(names, skiprows, skipfooter, usecols):
"""
Tell whether need to use inferred column names in workers or not.
1) ``False`` is returned in 2 cases and means next:
1.a) `names` parameter was provided from the API layer. In this case parameter
`names` must be provided as `names` parameter for ``read_csv`` in the workers.
1.b) `names` parameter wasn't provided from the API layer. In this case column names
inference must happen in each partition.
2) ``True`` is returned in case when inferred column names from pre-reading stage must be
provided as `names` parameter for ``read_csv`` in the workers.
In case `names` was provided, the other parameters aren't checked. Otherwise, inferred column
names should be used in a case of not full data reading which is defined by `skipfooter` parameter,
when need to skip lines at the bottom of file or by `skiprows` parameter, when need to skip lines at
the top of file (but if `usecols` was provided, column names inference must happen in the workers).
Parameters
----------
names : array-like
List of column names to use.
skiprows : list-like, int or callable
Line numbers to skip (0-indexed) or number of lines to skip (int) at
the start of the file. If callable, the callable function will be
evaluated against the row indices, returning ``True`` if the row should
be skipped and ``False`` otherwise.
skipfooter : int
Number of lines at bottom of the file to skip.
usecols : list-like or callable
Subset of the columns.
Returns
-------
bool
Whether to use inferred column names in ``read_csv`` of the workers or not.
"""
if names not in [None, lib.no_default]:
return False
if skipfooter != 0:
return True
if isinstance(skiprows, int) and skiprows == 0:
return False
if is_list_like(skiprows):
return usecols is None
return skiprows is not None
| TextFileDispatcher |
python | scrapy__scrapy | tests/test_command_check.py | {
"start": 428,
"end": 728
} | class ____(TestProjectBase):
spider_name = "check_spider"
def _write_contract(self, proj_path: Path, contracts: str, parse_def: str) -> None:
spider = proj_path / self.project_name / "spiders" / "checkspider.py"
spider.write_text(
f"""
import scrapy
| TestCheckCommand |
python | google__jax | jax/experimental/mosaic/gpu/core.py | {
"start": 9603,
"end": 9780
} | class ____(enum.Enum):
"""Semantics for the kernel's instruction stream."""
Lane = enum.auto()
Warpgroup = enum.auto()
@dataclasses.dataclass(frozen=True)
| LoweringSemantics |
python | kevin1024__vcrpy | vcr/unittest.py | {
"start": 68,
"end": 1017
} | class ____:
"""A TestCase mixin that provides VCR integration."""
vcr_enabled = True
def setUp(self):
super().setUp()
if self.vcr_enabled:
kwargs = self._get_vcr_kwargs()
myvcr = self._get_vcr(**kwargs)
cm = myvcr.use_cassette(self._get_cassette_name())
self.cassette = cm.__enter__()
self.addCleanup(cm.__exit__, None, None, None)
def _get_vcr(self, **kwargs):
if "cassette_library_dir" not in kwargs:
kwargs["cassette_library_dir"] = self._get_cassette_library_dir()
return VCR(**kwargs)
def _get_vcr_kwargs(self, **kwargs):
return kwargs
def _get_cassette_library_dir(self):
testdir = os.path.dirname(inspect.getfile(self.__class__))
return os.path.join(testdir, "cassettes")
def _get_cassette_name(self):
return f"{self.__class__.__name__}.{self._testMethodName}.yaml"
| VCRMixin |
python | langchain-ai__langchain | libs/partners/mistralai/tests/integration_tests/test_chat_models.py | {
"start": 1935,
"end": 6895
} | class ____(TypedDict):
name: str
authors: list[str]
def _check_parsed_result(result: Any, schema: Any) -> None:
if schema == Book:
assert isinstance(result, Book)
else:
assert all(key in ["name", "authors"] for key in result)
@pytest.mark.parametrize("schema", [Book, BookDict, Book.model_json_schema()])
def test_structured_output_json_schema(schema: Any) -> None:
llm = ChatMistralAI(model="ministral-8b-latest") # type: ignore[call-arg]
structured_llm = llm.with_structured_output(schema, method="json_schema")
messages = [
{"role": "system", "content": "Extract the book's information."},
{
"role": "user",
"content": "I recently read 'To Kill a Mockingbird' by Harper Lee.",
},
]
# Test invoke
result = structured_llm.invoke(messages)
_check_parsed_result(result, schema)
# Test stream
for chunk in structured_llm.stream(messages):
_check_parsed_result(chunk, schema)
@pytest.mark.parametrize("schema", [Book, BookDict, Book.model_json_schema()])
async def test_structured_output_json_schema_async(schema: Any) -> None:
llm = ChatMistralAI(model="ministral-8b-latest") # type: ignore[call-arg]
structured_llm = llm.with_structured_output(schema, method="json_schema")
messages = [
{"role": "system", "content": "Extract the book's information."},
{
"role": "user",
"content": "I recently read 'To Kill a Mockingbird' by Harper Lee.",
},
]
# Test invoke
result = await structured_llm.ainvoke(messages)
_check_parsed_result(result, schema)
# Test stream
async for chunk in structured_llm.astream(messages):
_check_parsed_result(chunk, schema)
def test_retry_parameters(caplog: pytest.LogCaptureFixture) -> None:
"""Test that retry parameters are honored in ChatMistralAI."""
# Create a model with intentionally short timeout and multiple retries
mistral = ChatMistralAI(
timeout=1, # Very short timeout to trigger timeouts
max_retries=3, # Should retry 3 times
)
# Simple test input that should take longer than 1 second to process
test_input = "Write a 2 sentence story about a cat"
# Measure start time
t0 = time.time()
logger = logging.getLogger(__name__)
try:
# Try to get a response
response = mistral.invoke(test_input)
# If successful, validate the response
elapsed_time = time.time() - t0
logger.info("Request succeeded in %.2f seconds", elapsed_time)
# Check that we got a valid response
assert response.content
assert isinstance(response.content, str)
assert "cat" in response.content.lower()
except ReadTimeout:
elapsed_time = time.time() - t0
logger.info("Request timed out after %.2f seconds", elapsed_time)
assert elapsed_time >= 3.0
pytest.skip("Test timed out as expected with short timeout")
except Exception:
logger.exception("Unexpected exception")
raise
def test_reasoning() -> None:
model = ChatMistralAI(model="magistral-medium-latest") # type: ignore[call-arg]
input_message = {
"role": "user",
"content": "Hello, my name is Bob.",
}
full: AIMessageChunk | None = None
for chunk in model.stream([input_message]):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
thinking_blocks = 0
for i, block in enumerate(full.content):
if isinstance(block, dict) and block.get("type") == "thinking":
thinking_blocks += 1
reasoning_block = full.content_blocks[i]
assert reasoning_block["type"] == "reasoning"
assert isinstance(reasoning_block.get("reasoning"), str)
assert thinking_blocks > 0
next_message = {"role": "user", "content": "What is my name?"}
_ = model.invoke([input_message, full, next_message])
def test_reasoning_v1() -> None:
model = ChatMistralAI(model="magistral-medium-latest", output_version="v1") # type: ignore[call-arg]
input_message = {
"role": "user",
"content": "Hello, my name is Bob.",
}
full: AIMessageChunk | None = None
chunks = []
for chunk in model.stream([input_message]):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
chunks.append(chunk)
assert isinstance(full, AIMessageChunk)
reasoning_blocks = 0
for block in full.content:
if isinstance(block, dict) and block.get("type") == "reasoning":
reasoning_blocks += 1
assert isinstance(block.get("reasoning"), str)
assert reasoning_blocks > 0
next_message = {"role": "user", "content": "What is my name?"}
_ = model.invoke([input_message, full, next_message])
| BookDict |
python | django__django | django/contrib/gis/feeds.py | {
"start": 4419,
"end": 4906
} | class ____(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super().root_attributes()
attrs["xmlns:georss"] = "http://www.georss.org/georss"
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed)
| GeoAtom1Feed |
python | numba__numba | numba/tests/test_array_attr.py | {
"start": 6338,
"end": 7991
} | class ____(MemoryLeakMixin, TestCase):
_numba_parallel_test_ = False
def test_array_ctypes_data(self):
pyfunc = array_ctypes_data
cfunc = njit(pyfunc)
arr = np.arange(3)
self.assertEqual(pyfunc(arr), cfunc(arr))
@skip_parfors_unsupported
def test_array_ctypes_ref_error_in_parallel(self):
# Issue #2887
from ctypes import CFUNCTYPE, c_void_p, c_int32, c_double, c_bool
@CFUNCTYPE(c_bool, c_void_p, c_int32, c_void_p)
def callback(inptr, size, outptr):
# A ctypes callback that manipulate the incoming pointers.
try:
inbuf = (c_double * size).from_address(inptr)
outbuf = (c_double * 1).from_address(outptr)
a = np.ndarray(size, buffer=inbuf, dtype=np.float64)
b = np.ndarray(1, buffer=outbuf, dtype=np.float64)
b[0] = (a + a.size)[0]
return True
except:
import traceback
traceback.print_exception()
return False
# parallel=True is required to reproduce the error.
@njit(parallel=True)
def foo(size):
arr = np.ones(size)
out = np.empty(1)
# Exercise array.ctypes
inct = arr.ctypes
outct = out.ctypes
# The reference to `arr` is dead by now
status = callback(inct.data, size, outct.data)
return status, out[0]
size = 3
status, got = foo(size)
self.assertTrue(status)
self.assertPreciseEqual(got, (np.ones(size) + size)[0])
| TestArrayCTypes |
python | django__django | tests/multiple_database/routers.py | {
"start": 664,
"end": 1721
} | class ____:
"""
Control all database operations on models in the contrib.auth application.
"""
def db_for_read(self, model, **hints):
"Point all read operations on auth models to 'default'"
if model._meta.app_label == "auth":
# We use default here to ensure we can tell the difference
# between a read request and a write request for Auth objects
return "default"
return None
def db_for_write(self, model, **hints):
"Point all operations on auth models to 'other'"
if model._meta.app_label == "auth":
return "other"
return None
def allow_relation(self, obj1, obj2, **hints):
"Allow any relation if a model in Auth is involved"
return obj1._meta.app_label == "auth" or obj2._meta.app_label == "auth" or None
def allow_migrate(self, db, app_label, **hints):
"Make sure the auth app only appears on the 'other' db"
if app_label == "auth":
return db == "other"
return None
| AuthRouter |
python | huggingface__transformers | tests/models/vilt/test_modeling_vilt.py | {
"start": 21910,
"end": 22785
} | class ____(ViltModelTest, unittest.TestCase):
all_model_classes = (ViltForImagesAndTextClassification,) if is_torch_available() else ()
def setUp(self):
self.model_tester = ViltModelTester(self, modality_type_vocab_size=3, add_multiple_images=True, num_images=2)
self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)
@unittest.skip(reason="We only test the model that takes in multiple images")
def test_model(self):
pass
@unittest.skip(reason="We only test the model that takes in multiple images")
def test_for_token_classification(self):
pass
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
| ViltForImagesAndTextClassificationModelTest |
python | giampaolo__psutil | tests/test_windows.py | {
"start": 3956,
"end": 11540
} | class ____(WindowsTestCase):
def test_nic_names(self):
out = sh('ipconfig /all')
nics = psutil.net_io_counters(pernic=True).keys()
for nic in nics:
if "pseudo-interface" in nic.replace(' ', '-').lower():
continue
if nic not in out:
return pytest.fail(
f"{nic!r} nic wasn't found in 'ipconfig /all' output"
)
def test_total_phymem(self):
w = wmi.WMI().Win32_ComputerSystem()[0]
assert int(w.TotalPhysicalMemory) == psutil.virtual_memory().total
def test_free_phymem(self):
w = wmi.WMI().Win32_PerfRawData_PerfOS_Memory()[0]
assert (
abs(int(w.AvailableBytes) - psutil.virtual_memory().free)
< TOLERANCE_SYS_MEM
)
def test_total_swapmem(self):
w = wmi.WMI().Win32_PerfRawData_PerfOS_Memory()[0]
assert (
int(w.CommitLimit) - psutil.virtual_memory().total
== psutil.swap_memory().total
)
if psutil.swap_memory().total == 0:
assert psutil.swap_memory().free == 0
assert psutil.swap_memory().used == 0
def test_percent_swapmem(self):
if psutil.swap_memory().total > 0:
w = wmi.WMI().Win32_PerfRawData_PerfOS_PagingFile(Name="_Total")[0]
# calculate swap usage to percent
percentSwap = int(w.PercentUsage) * 100 / int(w.PercentUsage_Base)
# exact percent may change but should be reasonable
# assert within +/- 5% and between 0 and 100%
assert psutil.swap_memory().percent >= 0
assert abs(psutil.swap_memory().percent - percentSwap) < 5
assert psutil.swap_memory().percent <= 100
# @pytest.mark.skipif(wmi is None, reason="wmi module is not installed")
# def test__UPTIME(self):
# # _UPTIME constant is not public but it is used internally
# # as value to return for pid 0 creation time.
# # WMI behaves the same.
# w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
# p = psutil.Process(0)
# wmic_create = str(w.CreationDate.split('.')[0])
# psutil_create = time.strftime("%Y%m%d%H%M%S",
# time.localtime(p.create_time()))
# Note: this test is not very reliable
@retry_on_failure()
def test_pids(self):
# Note: this test might fail if the OS is starting/killing
# other processes in the meantime
w = wmi.WMI().Win32_Process()
wmi_pids = {x.ProcessId for x in w}
psutil_pids = set(psutil.pids())
assert wmi_pids == psutil_pids
@retry_on_failure()
def test_disks(self):
ps_parts = psutil.disk_partitions(all=True)
wmi_parts = wmi.WMI().Win32_LogicalDisk()
for ps_part in ps_parts:
for wmi_part in wmi_parts:
if ps_part.device.replace('\\', '') == wmi_part.DeviceID:
if not ps_part.mountpoint:
# this is usually a CD-ROM with no disk inserted
break
if 'cdrom' in ps_part.opts:
break
if ps_part.mountpoint.startswith('A:'):
break # floppy
try:
usage = psutil.disk_usage(ps_part.mountpoint)
except FileNotFoundError:
# usually this is the floppy
break
assert usage.total == int(wmi_part.Size)
wmi_free = int(wmi_part.FreeSpace)
assert usage.free == wmi_free
# 10 MB tolerance
if abs(usage.free - wmi_free) > 10 * 1024 * 1024:
return pytest.fail(
f"psutil={usage.free}, wmi={wmi_free}"
)
break
else:
return pytest.fail(f"can't find partition {ps_part!r}")
@retry_on_failure()
def test_disk_usage(self):
for disk in psutil.disk_partitions():
if 'cdrom' in disk.opts:
continue
sys_value = win32api.GetDiskFreeSpaceEx(disk.mountpoint)
psutil_value = psutil.disk_usage(disk.mountpoint)
assert abs(sys_value[0] - psutil_value.free) < TOLERANCE_DISK_USAGE
assert (
abs(sys_value[1] - psutil_value.total) < TOLERANCE_DISK_USAGE
)
assert psutil_value.used == psutil_value.total - psutil_value.free
def test_disk_partitions(self):
sys_value = [
x + '\\'
for x in win32api.GetLogicalDriveStrings().split("\\\x00")
if x and not x.startswith('A:')
]
psutil_value = [
x.mountpoint
for x in psutil.disk_partitions(all=True)
if not x.mountpoint.startswith('A:')
]
assert sys_value == psutil_value
def test_convert_dos_path_drive(self):
winpath = 'C:\\Windows\\Temp'
driveletter = 'C:'
# Mocked NT device path for C:
devicepath = '\\Device\\HarddiskVolume1'
# Path returned by RtlDosPathNameToNtPathName
ntpath1 = '\\??\\C:\\Windows\\Temp'
# Mocked normalized NT path
ntpath2 = '\\Device\\HarddiskVolume1\\Windows\\Temp'
devices = {devicepath: driveletter}
with mock.patch(
'psutil._pswindows.cext.QueryDosDevice', side_effect=devices.get
) as m:
assert psutil._pswindows.convert_dos_path(ntpath1) == winpath
assert psutil._pswindows.convert_dos_path(ntpath2) == winpath
assert m.called
def test_convert_dos_path_unc(self):
# UNC path
winpath = '\\\\localhost\\C$\\Windows\\Temp'
# Path returned by RtlDosPathNameToNtPathName
ntpath1 = '\\??\\UNC\\localhost\\C$\\Windows\\Temp'
# Normalized NT path
ntpath2 = '\\Device\\Mup\\localhost\\C$\\Windows\\Temp'
assert psutil._pswindows.convert_dos_path(winpath) == winpath
assert psutil._pswindows.convert_dos_path(ntpath1) == winpath
assert psutil._pswindows.convert_dos_path(ntpath2) == winpath
def test_net_if_stats(self):
ps_names = set(cext.net_if_stats())
wmi_adapters = wmi.WMI().Win32_NetworkAdapter()
wmi_names = set()
for wmi_adapter in wmi_adapters:
wmi_names.add(wmi_adapter.Name)
wmi_names.add(wmi_adapter.NetConnectionID)
assert (
ps_names & wmi_names
), f"no common entries in {ps_names}, {wmi_names}"
def test_boot_time(self):
wmi_os = wmi.WMI().Win32_OperatingSystem()
wmi_btime_str = wmi_os[0].LastBootUpTime.split('.')[0]
wmi_btime_dt = datetime.datetime.strptime(
wmi_btime_str, "%Y%m%d%H%M%S"
)
psutil_dt = datetime.datetime.fromtimestamp(psutil.boot_time())
diff = abs((wmi_btime_dt - psutil_dt).total_seconds())
assert diff <= 5, (psutil_dt, wmi_btime_dt)
def test_uptime(self):
# ...against GetTickCount64() (Windows < 7, does not include
# time spent during suspend / hybernate).
ms = ctypes.windll.kernel32.GetTickCount64()
secs = ms / 1000.0
assert abs(cext.uptime() - secs) < 0.5
# ===================================================================
# sensors_battery()
# ===================================================================
| TestSystemAPIs |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/serializers/test_action_serializer.py | {
"start": 335,
"end": 3084
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.integration = self.create_integration(
provider="slack",
name="example-integration",
external_id="123-id",
metadata={},
organization=self.organization,
)
def test_serialize_simple(self) -> None:
action = self.create_action(
type=Action.Type.PLUGIN,
data={},
)
result = serialize(action)
assert result == {
"id": str(action.id),
"type": "plugin",
"data": {},
"integrationId": None,
"config": {},
"status": "active",
}
def test_serialize_disabled(self) -> None:
action = self.create_action(
type=Action.Type.PLUGIN,
data={},
status=ObjectStatus.DISABLED,
)
result = serialize(action)
assert result == {
"id": str(action.id),
"type": "plugin",
"data": {},
"integrationId": None,
"config": {},
"status": "disabled",
}
def test_serialize_with_integration(self) -> None:
action = self.create_action(
type=Action.Type.OPSGENIE,
data={"priority": "P1"},
integration_id=self.integration.id,
config={
"target_identifier": "123",
"target_type": ActionTarget.SPECIFIC,
},
)
result = serialize(action)
assert result == {
"id": str(action.id),
"type": "opsgenie",
"data": {"priority": "P1"},
"integrationId": str(self.integration.id),
"config": {"targetType": "specific", "targetIdentifier": "123"},
"status": "active",
}
def test_serialize_with_integration_and_config(self) -> None:
action2 = self.create_action(
type=Action.Type.SLACK,
data={"tags": "bar"},
integration_id=self.integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_display": "freddy frog",
"target_identifier": "123-id",
},
)
result2 = serialize(action2)
assert result2 == {
"id": str(action2.id),
"type": "slack",
"data": {"tags": "bar"},
"integrationId": str(self.integration.id),
"config": {
"targetType": "specific",
"targetDisplay": "freddy frog",
"targetIdentifier": "123-id",
},
"status": "active",
}
| TestActionSerializer |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sql_datasource.py | {
"start": 14356,
"end": 16455
} | class ____(_PartitionerOneColumnOneParam):
"""A partitioner than can be used for sql engines that represents datetimes as strings.
The SQL engine that this currently supports is SQLite since it stores its datetimes as
strings.
The DatetimePartitioner will also work for SQLite and may be more intuitive.
"""
# date_format_strings syntax is documented here:
# https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
# It allows for arbitrary strings so can't be validated until conversion time.
date_format_string: str
column_name: str
sort_ascending: bool = True
method_name: Literal["partition_on_converted_datetime"] = "partition_on_converted_datetime"
@property
@override
def param_names(self) -> List[str]:
# The datetime parameter will be a string representing a datetime in the format
# given by self.date_format_string.
return ["datetime"]
@override
def partitioner_method_kwargs(self) -> Dict[str, Any]:
return {
"column_name": self.column_name,
"date_format_string": self.date_format_string,
}
@override
def batch_parameters_to_batch_spec_kwarg_identifiers(
self, options: BatchParameters
) -> Dict[str, Any]:
if "datetime" not in options:
raise ValueError( # noqa: TRY003 # FIXME CoP
"'datetime' must be specified in the batch parameters to create a batch identifier"
)
return {self.column_name: options["datetime"]}
# We create this type instead of using _Partitioner so pydantic can use to this to
# coerce the partitioner to the right type during deserialization from config.
SqlPartitioner = Union[
SqlPartitionerColumnValue,
SqlPartitionerMultiColumnValue,
SqlPartitionerDividedInteger,
SqlPartitionerModInteger,
SqlPartitionerYear,
SqlPartitionerYearAndMonth,
SqlPartitionerYearAndMonthAndDay,
SqlPartitionerDatetimePart,
SqlitePartitionerConvertedDateTime,
]
@public_api
| SqlitePartitionerConvertedDateTime |
python | sqlalchemy__sqlalchemy | test/orm/test_syntax_extensions.py | {
"start": 1940,
"end": 2212
} | class ____(SyntaxExtension, ClauseElement):
_traverse_internals = []
def apply_to_select(self, select_stmt):
select_stmt.apply_syntax_extension_point(
lambda existing: [*existing, self],
"post_criteria",
)
| PostCriteriaClause2 |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 27355,
"end": 28680
} | class ____(ChainedSource):
# Key to access in the dictionary. It can be one of the following types
# 1) ConstDictKeySource
# 2) constant - like string, integer
index: Any
def __post_init__(self) -> None:
from .variables import ConstantVariable
assert isinstance(
self.index, ConstDictKeySource
) or ConstantVariable.is_literal(self.index)
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def reconstruct(self, codegen: "PyCodegen") -> None:
# reconstruct dict.__getitem__(dct, key)
# Load dict.__getitem__
codegen.add_push_null(
lambda: codegen.load_import_from(utils.__name__, "dict_getitem")
)
# Load dict
codegen(self.base)
# Load key
if isinstance(self.index, Source):
codegen(self.index)
else:
codegen.append_output(codegen.create_load_const(self.index))
codegen.extend_output(create_call_function(2, False))
def name(self) -> str:
if isinstance(self.index, ConstDictKeySource):
return f"dict.__getitem__({self.base.name()}, {self.index.name()})"
else:
return f"{self.base.name()}[{self.index!r}]"
@dataclasses.dataclass(frozen=True)
| DictSubclassGetItemSource |
python | python-poetry__poetry | src/poetry/puzzle/solver.py | {
"start": 8048,
"end": 10160
} | class ____:
def __init__(self, id: DFSNodeID, name: str, base_name: str) -> None:
self.id = id
self.name = name
self.base_name = base_name
def reachable(self) -> Sequence[Self]:
return []
def visit(self, parents: list[PackageNode]) -> None:
pass
def __str__(self) -> str:
return str(self.id)
def depth_first_search(
source: PackageNode,
) -> tuple[list[list[PackageNode]], MarkerOriginDict]:
back_edges: dict[DFSNodeID, list[PackageNode]] = defaultdict(list)
markers: MarkerOriginDict = defaultdict(
lambda: defaultdict(lambda: defaultdict(EmptyMarker))
)
visited: set[DFSNodeID] = set()
topo_sorted_nodes: list[PackageNode] = []
dfs_visit(source, back_edges, visited, topo_sorted_nodes, markers)
# Combine the nodes by name
combined_nodes: dict[str, list[PackageNode]] = defaultdict(list)
for node in topo_sorted_nodes:
node.visit(back_edges[node.id])
combined_nodes[node.name].append(node)
combined_topo_sorted_nodes: list[list[PackageNode]] = [
combined_nodes.pop(node.name)
for node in topo_sorted_nodes
if node.name in combined_nodes
]
return combined_topo_sorted_nodes, markers
def dfs_visit(
node: PackageNode,
back_edges: dict[DFSNodeID, list[PackageNode]],
visited: set[DFSNodeID],
sorted_nodes: list[PackageNode],
markers: MarkerOriginDict,
) -> None:
if node.id in visited:
return
visited.add(node.id)
for out_neighbor in node.reachable():
back_edges[out_neighbor.id].append(node)
groups = out_neighbor.groups
prev_marker = markers[out_neighbor.package][node.package][groups]
new_marker = (
out_neighbor.marker
if node.package.is_root()
else out_neighbor.marker.without_extras()
)
markers[out_neighbor.package][node.package][groups] = prev_marker.union(
new_marker
)
dfs_visit(out_neighbor, back_edges, visited, sorted_nodes, markers)
sorted_nodes.insert(0, node)
| DFSNode |
python | astropy__astropy | astropy/io/ascii/ipac.py | {
"start": 754,
"end": 972
} | class ____(Exception):
def __str__(self):
return "{}\nSee {}".format(
super().__str__(),
"https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html",
)
| IpacFormatError |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 153693,
"end": 154085
} | class ____(InputBuffer):
"""
Represents a donated buffer which is a saved tensor that is not alias to any
fwd inputs, fwd user outputs, and bwd outputs. We generally cannot inplace
reuse the input tensor memory during backward since it might be used in another
function. However, donated buffer can be inplace reused during backward
to save memory.
"""
| DonatedBuffer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-smartsheets/source_smartsheets/streams.py | {
"start": 2078,
"end": 2596
} | class ____(SmartsheetStreamBase):
"""
Stream for Smartsheet Reports.
Reports cannot be filtered by modifiedAt, so this stream does not support incremental sync.
"""
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
for record in self.smartsheet.read_records():
yield record
| SmartsheetReportStream |
python | gabrielfalcao__HTTPretty | tests/functional/testserver.py | {
"start": 3278,
"end": 3756
} | class ____(object):
def __init__(self, port):
self.port = int(port)
def start(self):
HTTPretty.disable()
args = [self.port]
self.process = Process(target=subprocess_server_tcp, args=args)
self.process.start()
time.sleep(1)
def stop(self):
try:
os.kill(self.process.pid, 9)
except OSError:
self.process.terminate()
finally:
self.is_running = False
| TCPServer |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/transport/stdio/producer.py | {
"start": 978,
"end": 3332
} | class ____(LanguageServerClient):
"""Implementation of a v3.0 compilant language server stdio client."""
MAX_TIMEOUT_TIME = 20000
def __init__(self, server_args='', log_file=None,
zmq_in_port=7000, zmq_out_port=7001):
super().__init__(zmq_in_port, zmq_out_port)
self.req_status = {}
self.process = None
logger.debug(repr(server_args))
logger.debug('Environment variables: {0}'.format(
list(os.environ.keys())))
if log_file:
logger.debug('Redirect stderr to {0}'.format(log_file))
log_file_handle = open(log_file, 'wb')
else:
log_file_handle = None
self.process = popen_spawn.PopenSpawn(
server_args, logfile=log_file_handle)
logger.info('Process pid: {0}'.format(self.process.pid))
logger.info('Connecting to language server on stdio')
super().finalize_initialization()
self.reading_thread = StdioIncomingMessageThread()
self.reading_thread.initialize(self.process, self.zmq_out_socket,
self.req_status, expectable=True)
def start(self):
self.reading_thread.start()
logger.info('Ready to receive/attend requests and responses!')
def stop(self):
logger.info('Closing consumer thread...')
self.reading_thread.stop()
logger.debug('Joining thread...')
logger.debug('Exit routine should be complete')
def transport_send(self, content_length, body):
if os.name == 'nt':
content_length = content_length.decode('utf-8')
body = body.decode('utf-8')
self.process.write(content_length)
self.process.write(body)
def is_server_alive(self):
"""This method verifies if stdout is broken."""
connected = False
connection_error = None
initial_time = time.time()
try:
while not connected:
connected = not self.process.proc.poll()
if time.time() - initial_time > self.MAX_TIMEOUT_TIME:
connection_error = 'Timeout communication period exceeded'
break
except Exception as e:
connection_error = e
return connected, connection_error, self.process.pid
| StdioLanguageServerClient |
python | django__django | django/db/models/expressions.py | {
"start": 44993,
"end": 46362
} | class ____(Expression):
contains_column_references = True
possibly_multivalued = False
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
alias, target = self.alias, self.target
identifiers = (alias, str(target)) if alias else (str(target),)
return "{}({})".format(self.__class__.__name__, ", ".join(identifiers))
def as_sql(self, compiler, connection):
alias, column = self.alias, self.target.column
identifiers = (alias, column) if alias else (column,)
sql = ".".join(map(compiler.quote_name_unless_alias, identifiers))
return sql, ()
def relabeled_clone(self, relabels):
if self.alias is None:
return self
return self.__class__(
relabels.get(self.alias, self.alias), self.target, self.output_field
)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return self.output_field.get_db_converters(
connection
) + self.target.get_db_converters(connection)
| Col |
python | django__django | tests/admin_views/test_multidb.py | {
"start": 6335,
"end": 7055
} | class ____:
def db_for_read(self, model, instance=None, **hints):
if model._meta.app_label in {"auth", "sessions", "contenttypes"}:
return "default"
return "other"
def db_for_write(self, model, **hints):
if model._meta.app_label in {"auth", "sessions", "contenttypes"}:
return "default"
return "other"
def allow_relation(self, obj1, obj2, **hints):
return obj1._state.db in {"default", "other"} and obj2._state.db in {
"default",
"other",
}
def allow_migrate(self, db, app_label, **hints):
return True
@override_settings(ROOT_URLCONF=__name__, DATABASE_ROUTERS=[ViewOnSiteRouter()])
| ViewOnSiteRouter |
python | scipy__scipy | scipy/stats/tests/test_generation/reference_distributions.py | {
"start": 12815,
"end": 13154
} | class ____(ReferenceDistribution):
def __init__(self, *, c, d):
super().__init__(c=c, d=d)
def _support(self, c, d):
return 0, mp.inf
def _pdf(self, x, c, d):
return c * d * x ** (-c - 1) * (1 + x ** (-c)) ** (-d - 1)
def _ppf(self, p, guess, c, d):
return (p**(-1.0/d) - 1)**(-1.0/c)
| Burr |
python | pytest-dev__pytest | src/_pytest/_code/code.py | {
"start": 45329,
"end": 45900
} | class ____(TerminalRepr):
# Provided by subclasses.
reprtraceback: ReprTraceback
reprcrash: ReprFileLocation | None
sections: list[tuple[str, str, str]] = dataclasses.field(
init=False, default_factory=list
)
def addsection(self, name: str, content: str, sep: str = "-") -> None:
self.sections.append((name, content, sep))
def toterminal(self, tw: TerminalWriter) -> None:
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
@dataclasses.dataclass(eq=False)
| ExceptionRepr |
python | crytic__slither | slither/utils/ck.py | {
"start": 9285,
"end": 9494
} | class ____:
"""Class to hold the information for a section of the report."""
title: str
pretty_table: MyPrettyTable
txt: str
@dataclass
# pylint: disable=too-many-instance-attributes
| SectionInfo |
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 22397,
"end": 22895
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MegatronBertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
@auto_docstring
| MegatronBertPreTrainingHeads |
python | scikit-learn__scikit-learn | sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py | {
"start": 10792,
"end": 16559
} | class ____(BaseDistancesReductionDispatcher):
"""Compute radius-based neighbors for two sets of vectors.
For each row-vector X[i] of the queries X, find all the indices j of
row-vectors in Y such that:
dist(X[i], Y[j]) <= radius
The distance function `dist` depends on the values of the `metric`
and `metric_kwargs` parameters.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def compute(
cls,
X,
Y,
radius,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
return_distance=False,
sort_results=False,
):
"""Return the results of the reduction for the given arguments.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
Input data.
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
Input data.
radius : float
The radius defining the neighborhood.
metric : str, default='euclidean'
The distance metric to use.
For a list of available metrics, see the documentation of
:class:`~sklearn.metrics.DistanceMetric`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
despite the synchronization step at each iteration of the outer loop
on chunks of `X`.
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
return_distance : boolean, default=False
Return distances between each X vector and its neighbors if set to True.
sort_results : boolean, default=False
Sort results with respect to distances between each X vector and its
neighbors if set to True.
Returns
-------
If return_distance=False:
- neighbors_indices : ndarray of n_samples_X ndarray
Indices of the neighbors for each vector in X.
If return_distance=True:
- neighbors_indices : ndarray of n_samples_X ndarray
Indices of the neighbors for each vector in X.
- neighbors_distances : ndarray of n_samples_X ndarray
Distances to the neighbors for each vector in X.
Notes
-----
This classmethod inspects the arguments values to dispatch to the
dtype-specialized implementation of :class:`RadiusNeighbors`.
This allows decoupling the API entirely from the implementation details
whilst maintaining RAII: all temporarily allocated datastructures necessary
for the concrete implementation are therefore freed when this classmethod
returns.
"""
if X.dtype == Y.dtype == np.float64:
return RadiusNeighbors64.compute(
X=X,
Y=Y,
radius=radius,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
sort_results=sort_results,
return_distance=return_distance,
)
if X.dtype == Y.dtype == np.float32:
return RadiusNeighbors32.compute(
X=X,
Y=Y,
radius=radius,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
sort_results=sort_results,
return_distance=return_distance,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
| RadiusNeighbors |
python | tensorflow__tensorflow | tensorflow/python/eager/context.py | {
"start": 14849,
"end": 15845
} | class ____(
collections.namedtuple("PhysicalDevice", ["name", "device_type"])
):
"""Abstraction for a locally visible physical device.
TensorFlow can utilize various devices such as the CPU or multiple GPUs
for computation. Before initializing a local device for use, the user can
customize certain properties of the device such as it's visibility or memory
configuration.
Once a visible `tf.config.PhysicalDevice` is initialized one or more
`tf.config.LogicalDevice` objects are created. Use
`tf.config.set_visible_devices` to configure the visibility of a physical
device and `tf.config.set_logical_device_configuration` to configure multiple
`tf.config.LogicalDevice` objects for a `tf.config.PhysicalDevice`. This is
useful when separation between models is needed or to simulate a multi-device
environment.
Fields:
name: Unique identifier for device.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
| PhysicalDevice |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_most_common_value.py | {
"start": 774,
"end": 3108
} | class ____(ColumnAggregateMetricProvider):
metric_name = "column.most_common_value"
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
mode_list = list(column.mode().values)
return mode_list
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
column_value_counts = metrics["column.value_counts"]
return list(column_value_counts[column_value_counts == column_value_counts.max()].index)
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
column_value_counts = metrics["column.value_counts"]
return list(column_value_counts[column_value_counts == column_value_counts.max()].index)
@classmethod
@override
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[Dict] = None,
):
"""Returns a dictionary of given metric names and their corresponding configuration,
specifying the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if isinstance(execution_engine, (SparkDFExecutionEngine, SqlAlchemyExecutionEngine)):
dependencies["column.value_counts"] = MetricConfiguration(
metric_name="column.value_counts",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs={
"sort": "value",
"collate": None,
},
)
return dependencies
| ColumnMostCommonValue |
python | viewflow__viewflow | viewflow/workflow/migrations/0009_merge.py | {
"start": 84,
"end": 261
} | class ____(migrations.Migration):
dependencies = [
("viewflow", "0008_jsonfield_and_artifact"),
("viewflow", "0008_merge"),
]
operations = []
| Migration |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/utils.py | {
"start": 184,
"end": 4693
} | class ____(torch.nn.Module, metaclass=abc.ABCMeta):
"""Wrapper for quantized modules than can be lowered from reference modules."""
@classmethod
@abc.abstractmethod
def from_reference(cls, ref_module, output_scale, output_zero_point):
raise NotImplementedError
def _get_weight_observer(observer):
# FakeQuantize observer
if hasattr(observer, "activation_post_process"):
observer = observer.activation_post_process
# UniformQuantizationObserverBase observer
return observer
def _needs_weight_clamping(observer, dtype):
observer = _get_weight_observer(observer)
if dtype in [torch.qint8, torch.quint8, torch.qint32]:
info = torch.iinfo(dtype)
return observer.quant_min > info.min or observer.quant_max < info.max
return False
def _clamp_weights(qweight, observer, scale, zp):
if not _needs_weight_clamping(observer, qweight.dtype):
return qweight
observer = _get_weight_observer(observer)
min_, max_ = observer.quant_min, observer.quant_max
# Doing this because can't use torch.ops.quantized.clamp() with per_channel qscheme yet.
qw_int_max = torch.clone(qweight.int_repr()).fill_(max_)
qw_int_min = torch.clone(qweight.int_repr()).fill_(min_)
qw_int = torch.minimum(torch.maximum(qweight.int_repr(), qw_int_min), qw_int_max)
if observer.qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]:
qweight = torch._make_per_tensor_quantized_tensor(
qw_int, scale.item(), zp.item()
)
elif observer.qscheme in [
torch.per_channel_symmetric,
torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
]:
qweight = torch._make_per_channel_quantized_tensor(
qw_int, scale, zp, axis=observer.ch_axis
)
else:
raise ValueError("Unexpected qscheme " + observer.qscheme)
return qweight
def _quantize_weight(float_wt, observer):
wt_scale, wt_zp = observer.calculate_qparams()
if observer.qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]:
qweight = torch.quantize_per_tensor(
float_wt, float(wt_scale), int(wt_zp), torch.qint8
)
qweight = _clamp_weights(qweight, observer, wt_scale, wt_zp)
elif observer.qscheme in [torch.per_channel_symmetric, torch.per_channel_affine]:
wt_axis = observer.ch_axis
qweight = torch.quantize_per_channel(
float_wt,
wt_scale.to(torch.double),
wt_zp.to(torch.int64),
wt_axis,
torch.qint8,
)
qweight = _clamp_weights(qweight, observer, wt_scale, wt_zp)
elif observer.qscheme == torch.per_channel_affine_float_qparams:
qweight = torch.quantize_per_channel(
float_wt,
wt_scale.to(torch.float),
wt_zp.to(torch.float),
observer.ch_axis,
observer.dtype,
)
qweight = _clamp_weights(qweight, observer, wt_scale, wt_zp)
else:
raise ValueError("Unexpected qscheme " + observer.qscheme)
return qweight
def _ntuple_from_first(n):
"""Converts the argument to a tuple of size n
with the first element repeated."""
def parse(x):
while isinstance(x, collections.abc.Sequence):
if len(x) == n:
break
x = x[0]
return tuple(itertools.repeat(x, n))
return parse
def _hide_packed_params_repr(self, params):
# We don't want to show `PackedParams` children, hence custom
# `__repr__`. This is the same as nn.Module.__repr__, except the check
# for the `params module`.
extra_lines = []
extra_repr = self.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split("\n")
child_lines = []
for key, module in self._modules.items():
if isinstance(module, params):
continue
mod_str = repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append("(" + key + "): " + mod_str)
lines = extra_lines + child_lines
main_str = self._get_name() + "("
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
_pair_from_first = _ntuple_from_first(2)
| WeightedQuantizedModule |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1402878,
"end": 1403089
} | class ____(TimeUnit):
"""SingleTimeUnit schema wrapper."""
_schema = {"$ref": "#/definitions/SingleTimeUnit"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| SingleTimeUnit |
python | mwaskom__seaborn | seaborn/axisgrid.py | {
"start": 62164,
"end": 87560
} | class ____(_BaseGrid):
"""Grid for drawing a bivariate plot with marginal univariate plots.
Many plots can be drawn by using the figure-level interface :func:`jointplot`.
Use this class directly when you need more flexibility.
"""
def __init__(
self, data=None, *,
x=None, y=None, hue=None,
height=6, ratio=5, space=.2,
palette=None, hue_order=None, hue_norm=None,
dropna=False, xlim=None, ylim=None, marginal_ticks=False,
):
# Set up the subplot grid
f = plt.figure(figsize=(height, height))
gs = plt.GridSpec(ratio + 1, ratio + 1)
ax_joint = f.add_subplot(gs[1:, :-1])
ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)
ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)
self._figure = f
self.ax_joint = ax_joint
self.ax_marg_x = ax_marg_x
self.ax_marg_y = ax_marg_y
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax_marg_x.get_xticklabels(), visible=False)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
plt.setp(ax_marg_x.get_xticklabels(minor=True), visible=False)
plt.setp(ax_marg_y.get_yticklabels(minor=True), visible=False)
# Turn off the ticks on the density axis for the marginal plots
if not marginal_ticks:
plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(minor=True), visible=False)
plt.setp(ax_marg_y.get_xticklabels(minor=True), visible=False)
ax_marg_x.yaxis.grid(False)
ax_marg_y.xaxis.grid(False)
# Process the input variables
p = VectorPlotter(data=data, variables=dict(x=x, y=y, hue=hue))
plot_data = p.plot_data.loc[:, p.plot_data.notna().any()]
# Possibly drop NA
if dropna:
plot_data = plot_data.dropna()
def get_var(var):
vector = plot_data.get(var, None)
if vector is not None:
vector = vector.rename(p.variables.get(var, None))
return vector
self.x = get_var("x")
self.y = get_var("y")
self.hue = get_var("hue")
for axis in "xy":
name = p.variables.get(axis, None)
if name is not None:
getattr(ax_joint, f"set_{axis}label")(name)
if xlim is not None:
ax_joint.set_xlim(xlim)
if ylim is not None:
ax_joint.set_ylim(ylim)
# Store the semantic mapping parameters for axes-level functions
self._hue_params = dict(palette=palette, hue_order=hue_order, hue_norm=hue_norm)
# Make the grid look nice
utils.despine(f)
if not marginal_ticks:
utils.despine(ax=ax_marg_x, left=True)
utils.despine(ax=ax_marg_y, bottom=True)
for axes in [ax_marg_x, ax_marg_y]:
for axis in [axes.xaxis, axes.yaxis]:
axis.label.set_visible(False)
f.tight_layout()
f.subplots_adjust(hspace=space, wspace=space)
def _inject_kwargs(self, func, kws, params):
"""Add params to kws if they are accepted by func."""
func_params = signature(func).parameters
for key, val in params.items():
if key in func_params:
kws.setdefault(key, val)
def plot(self, joint_func, marginal_func, **kwargs):
"""Draw the plot by passing functions for joint and marginal axes.
This method passes the ``kwargs`` dictionary to both functions. If you
need more control, call :meth:`JointGrid.plot_joint` and
:meth:`JointGrid.plot_marginals` directly with specific parameters.
Parameters
----------
joint_func, marginal_func : callables
Functions to draw the bivariate and univariate plots. See methods
referenced above for information about the required characteristics
of these functions.
kwargs
Additional keyword arguments are passed to both functions.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
self.plot_marginals(marginal_func, **kwargs)
self.plot_joint(joint_func, **kwargs)
return self
def plot_joint(self, func, **kwargs):
"""Draw a bivariate plot on the joint axes of the grid.
Parameters
----------
func : plotting callable
If a seaborn function, it should accept ``x`` and ``y``. Otherwise,
it must accept ``x`` and ``y`` vectors of data as the first two
positional arguments, and it must plot on the "current" axes.
If ``hue`` was defined in the class constructor, the function must
accept ``hue`` as a parameter.
kwargs
Keyword argument are passed to the plotting function.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
kwargs["ax"] = self.ax_joint
else:
plt.sca(self.ax_joint)
if self.hue is not None:
kwargs["hue"] = self.hue
self._inject_kwargs(func, kwargs, self._hue_params)
if str(func.__module__).startswith("seaborn"):
func(x=self.x, y=self.y, **kwargs)
else:
func(self.x, self.y, **kwargs)
return self
def plot_marginals(self, func, **kwargs):
"""Draw univariate plots on each marginal axes.
Parameters
----------
func : plotting callable
If a seaborn function, it should accept ``x`` and ``y`` and plot
when only one of them is defined. Otherwise, it must accept a vector
of data as the first positional argument and determine its orientation
using the ``vertical`` parameter, and it must plot on the "current" axes.
If ``hue`` was defined in the class constructor, it must accept ``hue``
as a parameter.
kwargs
Keyword argument are passed to the plotting function.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
seaborn_func = (
str(func.__module__).startswith("seaborn")
# deprecated distplot has a legacy API, special case it
and not func.__name__ == "distplot"
)
func_params = signature(func).parameters
kwargs = kwargs.copy()
if self.hue is not None:
kwargs["hue"] = self.hue
self._inject_kwargs(func, kwargs, self._hue_params)
if "legend" in func_params:
kwargs.setdefault("legend", False)
if "orientation" in func_params:
# e.g. plt.hist
orient_kw_x = {"orientation": "vertical"}
orient_kw_y = {"orientation": "horizontal"}
elif "vertical" in func_params:
# e.g. sns.distplot (also how did this get backwards?)
orient_kw_x = {"vertical": False}
orient_kw_y = {"vertical": True}
if seaborn_func:
func(x=self.x, ax=self.ax_marg_x, **kwargs)
else:
plt.sca(self.ax_marg_x)
func(self.x, **orient_kw_x, **kwargs)
if seaborn_func:
func(y=self.y, ax=self.ax_marg_y, **kwargs)
else:
plt.sca(self.ax_marg_y)
func(self.y, **orient_kw_y, **kwargs)
self.ax_marg_x.yaxis.get_label().set_visible(False)
self.ax_marg_y.xaxis.get_label().set_visible(False)
return self
def refline(
self, *, x=None, y=None, joint=True, marginal=True,
color='.5', linestyle='--', **line_kws
):
"""Add a reference line(s) to joint and/or marginal axes.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
joint, marginal : bools
Whether to add the reference line(s) to the joint/marginal axes.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s).
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
if joint:
self.ax_joint.axvline(x, **line_kws)
if marginal:
self.ax_marg_x.axvline(x, **line_kws)
if y is not None:
if joint:
self.ax_joint.axhline(y, **line_kws)
if marginal:
self.ax_marg_y.axhline(y, **line_kws)
return self
def set_axis_labels(self, xlabel="", ylabel="", **kwargs):
"""Set axis labels on the bivariate axes.
Parameters
----------
xlabel, ylabel : strings
Label names for the x and y variables.
kwargs : key, value mappings
Other keyword arguments are passed to the following functions:
- :meth:`matplotlib.axes.Axes.set_xlabel`
- :meth:`matplotlib.axes.Axes.set_ylabel`
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
self.ax_joint.set_xlabel(xlabel, **kwargs)
self.ax_joint.set_ylabel(ylabel, **kwargs)
return self
JointGrid.__init__.__doc__ = """\
Set up the grid of subplots and store data internally for easy plotting.
Parameters
----------
{params.core.data}
{params.core.xy}
height : number
Size of each side of the figure in inches (it will be square).
ratio : number
Ratio of joint axes height to marginal axes height.
space : number
Space between the joint and marginal axes
dropna : bool
If True, remove missing observations before plotting.
{{x, y}}lim : pairs of numbers
Set axis limits to these values before plotting.
marginal_ticks : bool
If False, suppress ticks on the count/density axis of the marginal plots.
{params.core.hue}
Note: unlike in :class:`FacetGrid` or :class:`PairGrid`, the axes-level
functions must support ``hue`` to use it in :class:`JointGrid`.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
See Also
--------
{seealso.jointplot}
{seealso.pairgrid}
{seealso.pairplot}
Examples
--------
.. include:: ../docstrings/JointGrid.rst
""".format(
params=_param_docs,
seealso=_core_docs["seealso"],
)
def pairplot(
data, *,
hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="auto", markers=None,
height=2.5, aspect=1, corner=False, dropna=False,
plot_kws=None, diag_kws=None, grid_kws=None, size=None,
):
"""Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each numeric
variable in ``data`` will by shared across the y-axes across a single row and
the x-axes across a single column. The diagonal plots are treated
differently: a univariate distribution plot is drawn to show the marginal
distribution of the data in each column.
It is also possible to show a subset of variables or plot different
variables on the rows and columns.
This is a high-level interface for :class:`PairGrid` that is intended to
make it easy to draw a few common styles. You should use :class:`PairGrid`
directly if you need more flexibility.
Parameters
----------
data : `pandas.DataFrame`
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : name of variable in ``data``
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'kde', 'hist', 'reg'}
Kind of plot to make.
diag_kind : {'auto', 'hist', 'kde', None}
Kind of plot for the diagonal subplots. If 'auto', choose based on
whether or not ``hue`` is used.
markers : single matplotlib marker code or list
Either the marker to use for all scatterplot points or a list of markers
with a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
dropna : boolean
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts
Dictionaries of keyword arguments. ``plot_kws`` are passed to the
bivariate plotting function, ``diag_kws`` are passed to the univariate
plotting function, and ``grid_kws`` are passed to the :class:`PairGrid`
constructor.
Returns
-------
grid : :class:`PairGrid`
Returns the underlying :class:`PairGrid` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise relationships.
JointGrid : Grid for plotting joint and marginal distributions of two variables.
Examples
--------
.. include:: ../docstrings/pairplot.rst
"""
# Avoid circular import
from .distributions import histplot, kdeplot
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
if not isinstance(data, pd.DataFrame):
raise TypeError(
f"'data' must be pandas DataFrame object, not: {type(data)}")
plot_kws = {} if plot_kws is None else plot_kws.copy()
diag_kws = {} if diag_kws is None else diag_kws.copy()
grid_kws = {} if grid_kws is None else grid_kws.copy()
# Resolve "auto" diag kind
if diag_kind == "auto":
if hue is None:
diag_kind = "kde" if kind == "kde" else "hist"
else:
diag_kind = "hist" if kind == "hist" else "kde"
# Set up the PairGrid
grid_kws.setdefault("diag_sharey", diag_kind == "hist")
grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
hue_order=hue_order, palette=palette, corner=corner,
height=height, aspect=aspect, dropna=dropna, **grid_kws)
# Add the markers here as PairGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if markers is not None:
if kind == "reg":
# Needed until regplot supports style
if grid.hue_names is None:
n_markers = 1
else:
n_markers = len(grid.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError("markers must be a singleton or a list of "
"markers for each level of the hue variable")
grid.hue_kws = {"marker": markers}
elif kind == "scatter":
if isinstance(markers, str):
plot_kws["marker"] = markers
elif hue is not None:
plot_kws["style"] = data[hue]
plot_kws["markers"] = markers
# Draw the marginal plots on the diagonal
diag_kws = diag_kws.copy()
diag_kws.setdefault("legend", False)
if diag_kind == "hist":
grid.map_diag(histplot, **diag_kws)
elif diag_kind == "kde":
diag_kws.setdefault("fill", True)
diag_kws.setdefault("warn_singular", False)
grid.map_diag(kdeplot, **diag_kws)
# Maybe plot on the off-diagonals
if diag_kind is not None:
plotter = grid.map_offdiag
else:
plotter = grid.map
if kind == "scatter":
from .relational import scatterplot # Avoid circular import
plotter(scatterplot, **plot_kws)
elif kind == "reg":
from .regression import regplot # Avoid circular import
plotter(regplot, **plot_kws)
elif kind == "kde":
from .distributions import kdeplot # Avoid circular import
plot_kws.setdefault("warn_singular", False)
plotter(kdeplot, **plot_kws)
elif kind == "hist":
from .distributions import histplot # Avoid circular import
plotter(histplot, **plot_kws)
# Add a legend
if hue is not None:
grid.add_legend()
grid.tight_layout()
return grid
def jointplot(
data=None, *, x=None, y=None, hue=None, kind="scatter",
height=6, ratio=5, space=.2, dropna=False, xlim=None, ylim=None,
color=None, palette=None, hue_order=None, hue_norm=None, marginal_ticks=False,
joint_kws=None, marginal_kws=None,
**kwargs
):
# Avoid circular imports
from .relational import scatterplot
from .regression import regplot, residplot
from .distributions import histplot, kdeplot, _freedman_diaconis_bins
if kwargs.pop("ax", None) is not None:
msg = "Ignoring `ax`; jointplot is a figure-level function."
warnings.warn(msg, UserWarning, stacklevel=2)
# Set up empty default kwarg dicts
joint_kws = {} if joint_kws is None else joint_kws.copy()
joint_kws.update(kwargs)
marginal_kws = {} if marginal_kws is None else marginal_kws.copy()
# Handle deprecations of distplot-specific kwargs
distplot_keys = [
"rug", "fit", "hist_kws", "norm_hist" "hist_kws", "rug_kws",
]
unused_keys = []
for key in distplot_keys:
if key in marginal_kws:
unused_keys.append(key)
marginal_kws.pop(key)
if unused_keys and kind != "kde":
msg = (
"The marginal plotting function has changed to `histplot`,"
" which does not accept the following argument(s): {}."
).format(", ".join(unused_keys))
warnings.warn(msg, UserWarning)
# Validate the plot kind
plot_kinds = ["scatter", "hist", "hex", "kde", "reg", "resid"]
_check_argument("kind", plot_kinds, kind)
# Raise early if using `hue` with a kind that does not support it
if hue is not None and kind in ["hex", "reg", "resid"]:
msg = f"Use of `hue` with `kind='{kind}'` is not currently supported."
raise ValueError(msg)
# Make a colormap based off the plot color
# (Currently used only for kind="hex")
if color is None:
color = "C0"
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [set_hls_values(color_rgb, l=val) for val in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Matplotlib's hexbin plot is not na-robust
if kind == "hex":
dropna = True
# Initialize the JointGrid object
grid = JointGrid(
data=data, x=x, y=y, hue=hue,
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
dropna=dropna, height=height, ratio=ratio, space=space,
xlim=xlim, ylim=ylim, marginal_ticks=marginal_ticks,
)
if grid.hue is not None:
marginal_kws.setdefault("legend", False)
# Plot the data using the grid
if kind.startswith("scatter"):
joint_kws.setdefault("color", color)
grid.plot_joint(scatterplot, **joint_kws)
if grid.hue is None:
marg_func = histplot
else:
marg_func = kdeplot
marginal_kws.setdefault("warn_singular", False)
marginal_kws.setdefault("fill", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(marg_func, **marginal_kws)
elif kind.startswith("hist"):
# TODO process pair parameters for bins, etc. and pass
# to both joint and marginal plots
joint_kws.setdefault("color", color)
grid.plot_joint(histplot, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
marg_x_kws = marginal_kws.copy()
marg_y_kws = marginal_kws.copy()
pair_keys = "bins", "binwidth", "binrange"
for key in pair_keys:
if isinstance(joint_kws.get(key), tuple):
x_val, y_val = joint_kws[key]
marg_x_kws.setdefault(key, x_val)
marg_y_kws.setdefault(key, y_val)
histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)
histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)
elif kind.startswith("kde"):
joint_kws.setdefault("color", color)
joint_kws.setdefault("warn_singular", False)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("color", color)
if "fill" in joint_kws:
marginal_kws.setdefault("fill", joint_kws["fill"])
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = min(_freedman_diaconis_bins(grid.x), 50)
y_bins = min(_freedman_diaconis_bins(grid.y), 50)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(histplot, **marginal_kws)
elif kind.startswith("reg"):
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", True)
grid.plot_marginals(histplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)
histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)
# Make the main axes active in the matplotlib state machine
plt.sca(grid.ax_joint)
return grid
jointplot.__doc__ = """\
Draw a plot of two variables with bivariate and univariate graphs.
This function provides a convenient interface to the :class:`JointGrid`
class, with several canned plot kinds. This is intended to be a fairly
lightweight wrapper; if you need more flexibility, you should use
:class:`JointGrid` directly.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
kind : {{ "scatter" | "kde" | "hist" | "hex" | "reg" | "resid" }}
Kind of plot to draw. See the examples for references to the underlying functions.
height : numeric
Size of the figure (it will be square).
ratio : numeric
Ratio of joint axes height to marginal axes height.
space : numeric
Space between the joint and marginal axes
dropna : bool
If True, remove observations that are missing from ``x`` and ``y``.
{{x, y}}lim : pairs of numbers
Axis limits to set before plotting.
{params.core.color}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
marginal_ticks : bool
If False, suppress ticks on the count/density axis of the marginal plots.
{{joint, marginal}}_kws : dicts
Additional keyword arguments for the plot components.
kwargs
Additional keyword arguments are passed to the function used to
draw the plot on the joint Axes, superseding items in the
``joint_kws`` dictionary.
Returns
-------
{returns.jointgrid}
See Also
--------
{seealso.jointgrid}
{seealso.pairgrid}
{seealso.pairplot}
Examples
--------
.. include:: ../docstrings/jointplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
| JointGrid |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 5523,
"end": 5688
} | class ____(ArgumentError):
"""Raised when more than one foreign key matching can be located
between two selectables during a join."""
| AmbiguousForeignKeysError |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 290434,
"end": 292083
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"all_licensable_users_count",
"asset_packs",
"bandwidth_quota",
"bandwidth_usage",
"bandwidth_usage_percentage",
"storage_quota",
"storage_usage",
"storage_usage_percentage",
"total_available_licenses",
"total_licenses",
)
all_licensable_users_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="allLicensableUsersCount"
)
asset_packs = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="assetPacks"
)
bandwidth_quota = sgqlc.types.Field(
sgqlc.types.non_null(Float), graphql_name="bandwidthQuota"
)
bandwidth_usage = sgqlc.types.Field(
sgqlc.types.non_null(Float), graphql_name="bandwidthUsage"
)
bandwidth_usage_percentage = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="bandwidthUsagePercentage"
)
storage_quota = sgqlc.types.Field(
sgqlc.types.non_null(Float), graphql_name="storageQuota"
)
storage_usage = sgqlc.types.Field(
sgqlc.types.non_null(Float), graphql_name="storageUsage"
)
storage_usage_percentage = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="storageUsagePercentage"
)
total_available_licenses = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalAvailableLicenses"
)
total_licenses = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalLicenses"
)
| EnterpriseBillingInfo |
python | django__django | tests/schema/models.py | {
"start": 4942,
"end": 5136
} | class ____(models.Model):
node_id = models.AutoField(primary_key=True)
parent = models.ForeignKey("self", models.CASCADE, null=True, blank=True)
class Meta:
apps = new_apps
| Node |
python | huggingface__transformers | examples/pytorch/summarization/run_summarization.py | {
"start": 2652,
"end": 4832
} | class ____:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `hf auth login` (stored in `~/.huggingface`)."
)
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether to trust the execution of code from datasets/models defined on the Hub."
" This option should only be set to `True` for repositories you trust and in which you have read the"
" code, as it will execute code present on the Hub on your local machine."
)
},
)
resize_position_embeddings: Optional[bool] = field(
default=None,
metadata={
"help": (
"Whether to automatically resize the position embeddings if `max_source_length` exceeds "
"the model's position embeddings."
)
},
)
@dataclass
| ModelArguments |
python | dagster-io__dagster | python_modules/dagster/dagster/_grpc/types.py | {
"start": 28551,
"end": 29314
} | class ____(
NamedTuple(
"_StartRunResult",
[
("success", bool),
("message", Optional[str]),
("serializable_error_info", Optional[SerializableErrorInfo]),
],
)
):
def __new__(
cls,
success: bool,
message: Optional[str],
serializable_error_info: Optional[SerializableErrorInfo],
):
return super().__new__(
cls,
success=check.bool_param(success, "success"),
message=check.opt_str_param(message, "message"),
serializable_error_info=check.opt_inst_param(
serializable_error_info, "serializable_error_info", SerializableErrorInfo
),
)
@whitelist_for_serdes
| StartRunResult |
python | django__django | tests/serializers/models/data.py | {
"start": 1851,
"end": 1954
} | class ____(models.Model):
data = models.PositiveSmallIntegerField(null=True)
| PositiveSmallIntegerData |
python | RaRe-Technologies__gensim | gensim/utils.py | {
"start": 34225,
"end": 35019
} | class ____(SaveLoad):
"""Wrap a `corpus` and repeat it `n` times.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.utils import RepeatCorpusNTimes
>>>
>>> corpus = [[(1, 0.5)], []]
>>> list(RepeatCorpusNTimes(corpus, 3)) # repeat 3 times
[[(1, 0.5)], [], [(1, 0.5)], [], [(1, 0.5)], []]
"""
def __init__(self, corpus, n):
"""
Parameters
----------
corpus : iterable of iterable of (int, numeric)
Input corpus.
n : int
Number of repeats for corpus.
"""
self.corpus = corpus
self.n = n
def __iter__(self):
for _ in range(self.n):
for document in self.corpus:
yield document
| RepeatCorpusNTimes |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/jupyter_widget.py | {
"start": 24729,
"end": 24977
} | class ____(JupyterWidget):
"""Deprecated class; use JupyterWidget."""
def __init__(self, *a, **kw):
warn("IPythonWidget is deprecated; use JupyterWidget",
DeprecationWarning)
super().__init__(*a, **kw)
| IPythonWidget |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-qianfan/llama_index/llms/qianfan/base.py | {
"start": 1201,
"end": 1453
} | class ____(BaseModel):
"""
Chat request model.
"""
messages: List[ChatMsg]
"""Chat message list."""
system: str = ""
"""Prompt."""
stream: bool = False
"""Indicate whether to respond in stream or not."""
| ChatRequest |
python | google__jax | jax/_src/pallas/mosaic/interpret/interpret_pallas_call.py | {
"start": 31506,
"end": 31599
} | class ____(enum.Enum):
STARTED = 0
READ = 1
COMPLETED = 2
@dataclasses.dataclass
| DmaState |
python | Netflix__metaflow | metaflow/plugins/argo/argo_client.py | {
"start": 205,
"end": 288
} | class ____(MetaflowException):
headline = "Argo Client error"
| ArgoClientException |
python | gevent__gevent | src/gevent/tests/test__api.py | {
"start": 3774,
"end": 4550
} | class ____(greentest.TestCase):
def test_timer_fired(self):
lst = [1]
def func():
gevent.spawn_later(0.01, lst.pop)
gevent.sleep(0.02)
gevent.spawn(func)
# Func has not run yet
self.assertEqual(lst, [1])
# Run callbacks but don't yield.
gevent.sleep()
# Let timers fire. Func should be done.
gevent.sleep(0.1)
self.assertEqual(lst, [])
def test_spawn_is_not_cancelled(self):
lst = [1]
def func():
gevent.spawn(lst.pop)
# exiting immediately, but self.lst.pop must be called
gevent.spawn(func)
gevent.sleep(0.1)
self.assertEqual(lst, [])
if __name__ == '__main__':
greentest.main()
| TestTimers |
python | Pylons__pyramid | tests/test_authentication.py | {
"start": 140,
"end": 8943
} | class ____(unittest.TestCase):
def setUp(self):
from pyramid.interfaces import IDebugLogger
self.config = testing.setUp()
self.config.registry.registerUtility(self, IDebugLogger)
self.messages = []
def tearDown(self):
del self.config
def debug(self, msg):
self.messages.append(msg)
def _makeOne(self, userid=None, callback=None):
from pyramid.authentication import CallbackAuthenticationPolicy
class MyAuthenticationPolicy(CallbackAuthenticationPolicy):
def unauthenticated_userid(self, request):
return userid
policy = MyAuthenticationPolicy()
policy.debug = True
policy.callback = callback
return policy
def test_authenticated_userid_no_unauthenticated_userid(self):
request = DummyRequest(registry=self.config.registry)
policy = self._makeOne()
self.assertEqual(policy.authenticated_userid(request), None)
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0],
'tests.test_authentication.MyAuthenticationPolicy.'
'authenticated_userid: call to unauthenticated_userid returned '
'None; returning None',
)
def test_authenticated_userid_no_callback(self):
request = DummyRequest(registry=self.config.registry)
policy = self._makeOne(userid='fred')
self.assertEqual(policy.authenticated_userid(request), 'fred')
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0],
"tests.test_authentication.MyAuthenticationPolicy."
"authenticated_userid: there was no groupfinder callback; "
"returning 'fred'",
)
def test_authenticated_userid_with_callback_fail(self):
request = DummyRequest(registry=self.config.registry)
def callback(userid, request):
return None
policy = self._makeOne(userid='fred', callback=callback)
self.assertEqual(policy.authenticated_userid(request), None)
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0],
'tests.test_authentication.MyAuthenticationPolicy.'
'authenticated_userid: groupfinder callback returned None; '
'returning None',
)
def test_authenticated_userid_with_callback_success(self):
request = DummyRequest(registry=self.config.registry)
def callback(userid, request):
return []
policy = self._makeOne(userid='fred', callback=callback)
self.assertEqual(policy.authenticated_userid(request), 'fred')
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0],
"tests.test_authentication.MyAuthenticationPolicy."
"authenticated_userid: groupfinder callback returned []; "
"returning 'fred'",
)
def test_authenticated_userid_fails_cleaning_as_Authenticated(self):
request = DummyRequest(registry=self.config.registry)
policy = self._makeOne(userid='system.Authenticated')
self.assertEqual(policy.authenticated_userid(request), None)
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0],
"tests.test_authentication.MyAuthenticationPolicy."
"authenticated_userid: use of userid 'system.Authenticated' is "
"disallowed by any built-in Pyramid security policy, returning "
"None",
)
def test_authenticated_userid_fails_cleaning_as_Everyone(self):
request = DummyRequest(registry=self.config.registry)
policy = self._makeOne(userid='system.Everyone')
self.assertEqual(policy.authenticated_userid(request), None)
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0],
"tests.test_authentication.MyAuthenticationPolicy."
"authenticated_userid: use of userid 'system.Everyone' is "
"disallowed by any built-in Pyramid security policy, returning "
"None",
)
def test_effective_principals_no_unauthenticated_userid(self):
request = DummyRequest(registry=self.config.registry)
policy = self._makeOne()
self.assertEqual(
policy.effective_principals(request), ['system.Everyone']
)
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0],
"tests.test_authentication.MyAuthenticationPolicy."
"effective_principals: unauthenticated_userid returned None; "
"returning ['system.Everyone']",
)
def test_effective_principals_no_callback(self):
request = DummyRequest(registry=self.config.registry)
policy = self._makeOne(userid='fred')
self.assertEqual(
policy.effective_principals(request),
['system.Everyone', 'system.Authenticated', 'fred'],
)
self.assertEqual(len(self.messages), 2)
self.assertEqual(
self.messages[0],
'tests.test_authentication.MyAuthenticationPolicy.'
'effective_principals: groupfinder callback is None, so groups '
'is []',
)
self.assertEqual(
self.messages[1],
"tests.test_authentication.MyAuthenticationPolicy."
"effective_principals: returning effective principals: "
"['system.Everyone', 'system.Authenticated', 'fred']",
)
def test_effective_principals_with_callback_fail(self):
request = DummyRequest(registry=self.config.registry)
def callback(userid, request):
return None
policy = self._makeOne(userid='fred', callback=callback)
self.assertEqual(
policy.effective_principals(request), ['system.Everyone']
)
self.assertEqual(len(self.messages), 2)
self.assertEqual(
self.messages[0],
'tests.test_authentication.MyAuthenticationPolicy.'
'effective_principals: groupfinder callback returned None as '
'groups',
)
self.assertEqual(
self.messages[1],
"tests.test_authentication.MyAuthenticationPolicy."
"effective_principals: returning effective principals: "
"['system.Everyone']",
)
def test_effective_principals_with_callback_success(self):
request = DummyRequest(registry=self.config.registry)
def callback(userid, request):
return []
policy = self._makeOne(userid='fred', callback=callback)
self.assertEqual(
policy.effective_principals(request),
['system.Everyone', 'system.Authenticated', 'fred'],
)
self.assertEqual(len(self.messages), 2)
self.assertEqual(
self.messages[0],
'tests.test_authentication.MyAuthenticationPolicy.'
'effective_principals: groupfinder callback returned [] as groups',
)
self.assertEqual(
self.messages[1],
"tests.test_authentication.MyAuthenticationPolicy."
"effective_principals: returning effective principals: "
"['system.Everyone', 'system.Authenticated', 'fred']",
)
def test_effective_principals_with_unclean_principal_Authenticated(self):
request = DummyRequest(registry=self.config.registry)
policy = self._makeOne(userid='system.Authenticated')
self.assertEqual(
policy.effective_principals(request), ['system.Everyone']
)
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0],
"tests.test_authentication.MyAuthenticationPolicy."
"effective_principals: unauthenticated_userid returned disallowed "
"'system.Authenticated'; returning ['system.Everyone'] as if it "
"was None",
)
def test_effective_principals_with_unclean_principal_Everyone(self):
request = DummyRequest(registry=self.config.registry)
policy = self._makeOne(userid='system.Everyone')
self.assertEqual(
policy.effective_principals(request), ['system.Everyone']
)
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0],
"tests.test_authentication.MyAuthenticationPolicy."
"effective_principals: unauthenticated_userid returned disallowed "
"'system.Everyone'; returning ['system.Everyone'] as if it "
"was None",
)
| TestCallbackAuthenticationPolicyDebugging |
python | bokeh__bokeh | src/bokeh/application/handlers/lifecycle.py | {
"start": 1503,
"end": 4735
} | class ____(Handler):
''' Load a script which contains server lifecycle callbacks.
'''
_on_server_loaded: Callable[[ServerContext], None]
_on_server_unloaded: Callable[[ServerContext], None]
_on_session_created: Callable[[SessionContext], None]
_on_session_destroyed: Callable[[SessionContext], None]
def __init__(self) -> None:
super().__init__()
self._on_server_loaded = _do_nothing
self._on_server_unloaded = _do_nothing
self._on_session_created = _do_nothing
self._on_session_destroyed = _do_nothing
@property
def safe_to_fork(self) -> bool:
return True
# Public methods ----------------------------------------------------------
def modify_document(self, doc: Document) -> None:
''' This handler does not make any modifications to the Document.
Args:
doc (Document) : A Bokeh Document to update in-place
*This handler does not modify the document*
Returns:
None
'''
# we could support a modify_document function, might be weird though.
pass
def on_server_loaded(self, server_context: ServerContext) -> None:
''' Execute `on_server_unloaded`` from the configured module (if
it is defined) when the server is first started.
Args:
server_context (ServerContext) :
'''
return self._on_server_loaded(server_context)
def on_server_unloaded(self, server_context: ServerContext) -> None:
''' Execute ``on_server_unloaded`` from the configured module (if
it is defined) when the server cleanly exits. (Before stopping the
server's ``IOLoop``.)
Args:
server_context (ServerContext) :
.. warning::
In practice this code may not run, since servers are often killed
by a signal.
'''
return self._on_server_unloaded(server_context)
async def on_session_created(self, session_context: SessionContext) -> None:
''' Execute ``on_session_created`` from the configured module (if
it is defined) when a new session is created.
Args:
session_context (SessionContext) :
'''
return self._on_session_created(session_context)
async def on_session_destroyed(self, session_context: SessionContext) -> None:
''' Execute ``on_session_destroyed`` from the configured module (if
it is defined) when a new session is destroyed.
Args:
session_context (SessionContext) :
'''
return self._on_session_destroyed(session_context)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _do_nothing(ignored: Any) -> None:
pass
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| LifecycleHandler |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_ignored_modules.py | {
"start": 1965,
"end": 2222
} | class ____(torch.nn.Module):
def __init__(self, in_dim: int, out_dim: int) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.randn((in_dim, out_dim)))
def forward(self, x):
return x @ self.weight
| IgnoredModule |
python | ray-project__ray | python/ray/tests/chaos/potato_passer.py | {
"start": 486,
"end": 2218
} | class ____:
def __init__(self, name, next_name, sleep_secs):
self.count = 0
self.name = name
self.next_name = next_name
self.sleep_secs = sleep_secs
self.print_every = 100
async def pass_potato(self, potato: int, target: int):
self.count += 1
if potato % self.print_every == 0:
print(
f"running, name {self.name}, count {self.count}, "
f"potato {potato}, target {target}"
)
if potato >= target:
print(f"target reached! name = {self.name}, count = {self.count}")
return target
next_actor = ray.get_actor(self.next_name)
await asyncio.sleep(self.sleep_secs)
return await next_actor.pass_potato.remote(potato + 1, target)
async def main():
parser = argparse.ArgumentParser()
parser.add_argument("--num-actors", type=int, help="Make this many actors")
parser.add_argument("--pass-times", type=int, help="Pass this many messages")
parser.add_argument(
"--sleep-secs",
type=float,
help="Sleep seconds before sending message to next actor",
)
args = parser.parse_args()
actors = []
for i in range(args.num_actors):
this_actor = "actor" + str(i)
next_actor = "actor" + str((i + 1) % args.num_actors)
actor = PotatoPasser.options(
name=this_actor, scheduling_strategy="SPREAD"
).remote(this_actor, next_actor, args.sleep_secs)
actors.append(actor)
ret = await actors[0].pass_potato.remote(0, args.pass_times)
print(f"passed potato {ret} times! expected {args.pass_times} times.")
assert ret == args.pass_times
asyncio.run(main())
| PotatoPasser |
python | dagster-io__dagster | python_modules/libraries/dagster-omni/dagster_omni/objects.py | {
"start": 2483,
"end": 2914
} | class ____:
"""Represents the essential query configuration needed for asset creation."""
table: str
fields: list[str]
@classmethod
def from_json(cls, data: dict[str, Any]) -> "OmniQueryConfig":
"""Create OmniQueryConfig from JSON query configuration data."""
return cls(
table=data["table"],
fields=data["fields"],
)
@whitelist_for_serdes
@record
| OmniQueryConfig |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 643717,
"end": 644231
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("id", "viewer_can_subscribe", "viewer_subscription")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
viewer_can_subscribe = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanSubscribe"
)
viewer_subscription = sgqlc.types.Field(
SubscriptionState, graphql_name="viewerSubscription"
)
| Subscribable |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 24948,
"end": 25212
} | class ____(DeviceTypeTestBase):
device_type = "hpu"
primary_device: ClassVar[str]
@classmethod
def get_primary_device(cls):
return cls.primary_device
@classmethod
def setUpClass(cls):
cls.primary_device = "hpu:0"
| HPUTestBase |
python | huggingface__transformers | src/transformers/models/siglip/modeling_siglip.py | {
"start": 19859,
"end": 22150
} | class ____(nn.Module):
def __init__(self, config: SiglipTextConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = SiglipTextEmbeddings(config)
self.encoder = SiglipEncoder(config)
self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.head = nn.Linear(embed_dim, config.projection_size)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
if input_ids is None:
raise ValueError("You have to specify input_ids")
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
# note: SigLIP's text model does not use a causal mask, unlike the original CLIP model.
# expand attention_mask
uses_flash_attention = "flash" in self.config._attn_implementation
if uses_flash_attention:
attention_mask = None
elif attention_mask is not None and not uses_flash_attention:
# [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
attention_mask=attention_mask,
**kwargs,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.final_layer_norm(last_hidden_state)
# The model uses the last token's hidden state, which may be padding.
pooled_output = last_hidden_state[:, -1, :]
pooled_output = self.head(pooled_output)
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
)
@auto_docstring(
custom_intro="""
The text model from SigLIP without any head or projection on top.
"""
)
| SiglipTextTransformer |
python | astropy__astropy | astropy/timeseries/periodograms/base.py | {
"start": 170,
"end": 1966
} | class ____:
@abc.abstractmethod
def __init__(self, t, y, dy=None):
pass
@classmethod
def from_timeseries(
cls, timeseries, signal_column_name=None, uncertainty=None, **kwargs
):
"""
Initialize a periodogram from a time series object.
If a binned time series is passed, the time at the center of the bins is
used. Also note that this method automatically gets rid of NaN/undefined
values when initializing the periodogram.
Parameters
----------
signal_column_name : str
The name of the column containing the signal values to use.
uncertainty : str or float or `~astropy.units.Quantity`, optional
The name of the column containing the errors on the signal, or the
value to use for the error, if a scalar.
**kwargs
Additional keyword arguments are passed to the initializer for this
periodogram class.
"""
if signal_column_name is None:
raise ValueError("signal_column_name should be set to a valid column name")
y = timeseries[signal_column_name]
keep = ~np.isnan(y)
if isinstance(uncertainty, str):
dy = timeseries[uncertainty]
keep &= ~np.isnan(dy)
dy = dy[keep]
else:
dy = uncertainty
if isinstance(timeseries, TimeSeries):
time = timeseries.time
elif isinstance(timeseries, BinnedTimeSeries):
time = timeseries.time_bin_center
else:
raise TypeError(
"Input time series should be an instance of "
"TimeSeries or BinnedTimeSeries"
)
return cls(time[keep], y[keep], dy=dy, **kwargs)
| BasePeriodogram |
python | charlax__professional-programming | antipatterns/python-examples/reraise_exceptions_bad.py | {
"start": 75,
"end": 344
} | class ____(Exception):
pass
def toast(bread):
try:
put_in_toaster(bread)
except:
raise ToastException("Could not toast bread")
def put_in_toaster(bread):
brad.color = "light_brown" # Note the typo
toast(Bread("yellow"))
| ToastException |
python | PrefectHQ__prefect | tests/server/models/test_deployments.py | {
"start": 18141,
"end": 25376
} | class ____:
@pytest.fixture
async def deployment_id_1(self):
return uuid4()
@pytest.fixture
async def deployment_id_2(self):
return uuid4()
@pytest.fixture
async def deployment_id_3(self):
return uuid4()
@pytest.fixture
async def filter_data(
self,
session,
flow,
flow_function,
deployment_id_1,
deployment_id_2,
deployment_id_3,
):
await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
id=deployment_id_1,
name="My Deployment",
flow_id=flow.id,
paused=False,
),
)
await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
id=deployment_id_2,
name="Another Deployment",
flow_id=flow.id,
tags=["tb12"],
paused=False,
),
)
await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
id=deployment_id_3,
name="Yet Another Deployment",
flow_id=flow.id,
tags=["tb12", "goat"],
paused=True,
),
)
async def test_read_deployments(self, filter_data, session):
read_deployments = await models.deployments.read_deployments(session=session)
assert len(read_deployments) == 3
async def test_read_deployments_applies_limit(self, filter_data, session):
read_deployments = await models.deployments.read_deployments(
session=session, limit=1
)
assert len(read_deployments) == 1
async def test_read_deployments_applies_offset(
self, deployment_id_1, filter_data, session
):
read_deployments = await models.deployments.read_deployments(
session=session, offset=1, limit=1
)
# sorts by name by default
assert {deployment.id for deployment in read_deployments} == {deployment_id_1}
async def test_read_deployments_returns_empty_list(self, session):
read_deployments = await models.deployments.read_deployments(session=session)
assert len(read_deployments) == 0
async def test_read_deployment_filters_by_id(
self, filter_data, deployment_id_1, session
):
result = await models.deployments.read_deployments(
session=session,
deployment_filter=filters.DeploymentFilter(
id=filters.DeploymentFilterId(any_=[deployment_id_1]),
),
)
assert {res.id for res in result} == {deployment_id_1}
async def test_read_deployment_filters_by_name(
self, filter_data, deployment_id_2, session
):
result = await models.deployments.read_deployments(
session=session,
deployment_filter=filters.DeploymentFilter(
name=filters.DeploymentFilterName(any_=["Another Deployment"]),
),
)
assert {res.id for res in result} == {deployment_id_2}
async def test_read_deployment_filters_by_paused(
self, filter_data, deployment_id_3, session
):
result = await models.deployments.read_deployments(
session=session,
deployment_filter=filters.DeploymentFilter(
paused=filters.DeploymentFilterPaused(eq_=True)
),
)
assert {res.id for res in result} == {deployment_id_3}
async def test_read_deployment_filters_filters_by_tags(
self, filter_data, deployment_id_1, deployment_id_3, session
):
result = await models.deployments.read_deployments(
session=session,
deployment_filter=filters.DeploymentFilter(
tags=filters.DeploymentFilterTags(all_=["goat"])
),
)
assert {res.id for res in result} == {deployment_id_3}
result = await models.deployments.read_deployments(
session=session,
deployment_filter=filters.DeploymentFilter(
tags=filters.DeploymentFilterTags(is_null_=True)
),
)
assert {res.id for res in result} == {deployment_id_1}
async def test_read_deployment_filters_filters_by_flow_criteria(
self, filter_data, flow, deployment_id_3, session
):
result = await models.deployments.read_deployments(
session=session,
deployment_filter=filters.DeploymentFilter(
tags=filters.DeploymentFilterTags(all_=["goat"])
),
flow_filter=filters.FlowFilter(id=filters.FlowFilterId(any_=[flow.id])),
)
assert {res.id for res in result} == {deployment_id_3}
result = await models.deployments.read_deployments(
session=session,
deployment_filter=filters.DeploymentFilter(
tags=filters.DeploymentFilterTags(all_=["goat"])
),
flow_filter=filters.FlowFilter(id=filters.FlowFilterId(any_=[uuid4()])),
)
assert len(result) == 0
async def test_read_deployment_filters_filters_by_flow_run_criteria(
self, filter_data, flow, deployment_id_3, session
):
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id, deployment_id=deployment_id_3
),
)
result = await models.deployments.read_deployments(
session=session,
flow_run_filter=filters.FlowRunFilter(
id=filters.FlowRunFilterId(any_=[flow_run.id])
),
)
assert {res.id for res in result} == {deployment_id_3}
result = await models.deployments.read_deployments(
session=session,
flow_run_filter=filters.FlowRunFilter(
id=filters.FlowRunFilterId(any_=[uuid4()])
),
)
assert len(result) == 0
async def test_read_deployment_filters_filters_by_task_run_criteria(
self, filter_data, flow, deployment_id_3, session
):
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id, deployment_id=deployment_id_3
),
)
task_run = await models.task_runs.create_task_run(
session=session,
task_run=schemas.core.TaskRun(
flow_run_id=flow_run.id, task_key="my-task", dynamic_key="0"
),
)
result = await models.deployments.read_deployments(
session=session,
task_run_filter=schemas.filters.TaskRunFilter(id=dict(any_=[task_run.id])),
)
assert {res.id for res in result} == {deployment_id_3}
result = await models.deployments.read_deployments(
session=session,
task_run_filter=schemas.filters.TaskRunFilter(id=dict(any_=[uuid4()])),
)
assert len(result) == 0
| TestReadDeployments |
python | kubernetes-client__python | kubernetes/client/models/v1_cron_job.py | {
"start": 383,
"end": 7106
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1CronJobSpec',
'status': 'V1CronJobStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1CronJob - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1CronJob. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1CronJob. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1CronJob.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1CronJob. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1CronJob. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1CronJob. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1CronJob.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1CronJob. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1CronJob. # noqa: E501
:return: The metadata of this V1CronJob. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1CronJob.
:param metadata: The metadata of this V1CronJob. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1CronJob. # noqa: E501
:return: The spec of this V1CronJob. # noqa: E501
:rtype: V1CronJobSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1CronJob.
:param spec: The spec of this V1CronJob. # noqa: E501
:type: V1CronJobSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1CronJob. # noqa: E501
:return: The status of this V1CronJob. # noqa: E501
:rtype: V1CronJobStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1CronJob.
:param status: The status of this V1CronJob. # noqa: E501
:type: V1CronJobStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CronJob):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CronJob):
return True
return self.to_dict() != other.to_dict()
| V1CronJob |
python | huggingface__transformers | src/transformers/data/data_collator.py | {
"start": 17931,
"end": 21111
} | class ____(DataCollatorMixin):
"""
Data collator that dynamically pads a batch of nested examples for multiple choice, so that all choices
of all examples have the same length.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences according to the model's padding side and padding index
among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
Pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
"""
tokenizer: PreTrainedTokenizerBase
padding: bool | str | PaddingStrategy = True
max_length: int | None = None
pad_to_multiple_of: int | None = None
return_tensors: str = "pt"
def torch_call(self, examples: list[dict[str, Any]]): # Refactored implementation from the docs.
import torch
# Take labels out of the examples beforehand, because they aren't nested.
label_name = "label" if "label" in examples[0] else "labels"
labels = [example.pop(label_name) for example in examples]
batch_size = len(examples)
num_choices = len(examples[0]["input_ids"])
# Go from e.g. 2 examples of 2 choices [{input_ids: [[1], [2]]}, {input_ids: [[3], [4]]}]
# to 4 examples [{input_ids: [1]}, {input_ids: [2]}] + [{input_ids: [3]}, {input_ids: [4]}]
flat_examples = sum(
([{k: v[i] for k, v in example.items()} for i in range(num_choices)] for example in examples), start=[]
)
# Pad all choices of all examples as if you're padding any other batch of examples.
batch = self.tokenizer.pad(
flat_examples,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
# Reshape from B*C x L into B x C x L, and add the labels back in.
batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}
batch["labels"] = torch.tensor(labels, dtype=torch.int64)
return batch
@dataclass
| DataCollatorForMultipleChoice |
python | great-expectations__great_expectations | great_expectations/data_context/types/resource_identifiers.py | {
"start": 2795,
"end": 3058
} | class ____(Schema):
batch_identifier = fields.Str()
data_asset_name = fields.Str()
# noinspection PyUnusedLocal
@post_load
def make_batch_identifier(self, data, **kwargs):
return BatchIdentifier(**data)
@public_api
| BatchIdentifierSchema |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 31363,
"end": 37661
} | class ____:
"""
Note, task retry delays are tested in `test_engine` because we need to mock the
sleep call which requires a task run id before the task is called.
"""
@pytest.mark.parametrize("always_fail", [True, False])
async def test_task_respects_retry_count(
self, always_fail, prefect_client, events_pipeline
):
mock = MagicMock()
exc = ValueError()
@task(retries=3)
def flaky_function():
mock()
# 3 retries means 4 attempts
# Succeed on the final retry unless we're ending in a failure
if not always_fail and mock.call_count == 4:
return True
raise exc
@flow
def test_flow():
future = flaky_function.submit()
future.wait()
return future.state, ...
task_run_state, _ = test_flow()
task_run_id = task_run_state.state_details.task_run_id
if always_fail:
assert task_run_state.is_failed()
assert exceptions_equal(
await task_run_state.result(raise_on_failure=False), exc
)
assert mock.call_count == 4
else:
assert task_run_state.is_completed()
assert await task_run_state.result() is True
assert mock.call_count == 4
await events_pipeline.process_events()
states = await prefect_client.read_task_run_states(task_run_id)
state_names = [state.name for state in states]
# task retries are client-side in the new engine
assert state_names == [
"Pending",
"Running",
"Retrying",
"Retrying",
"Retrying",
"Failed" if always_fail else "Completed",
]
async def test_task_only_uses_necessary_retries(
self, prefect_client, events_pipeline
):
mock = MagicMock()
exc = ValueError()
@task(retries=3)
def flaky_function():
mock()
if mock.call_count == 2:
return True
raise exc
@flow
def test_flow():
future = flaky_function.submit()
future.wait()
return future.state
task_run_state = test_flow()
task_run_id = task_run_state.state_details.task_run_id
assert task_run_state.is_completed()
assert await task_run_state.result() is True
assert mock.call_count == 2
await events_pipeline.process_events()
states = await prefect_client.read_task_run_states(task_run_id)
state_names = [state.name for state in states]
# task retries are client side in the new engine
assert state_names == [
"Pending",
"Running",
"Retrying",
"Completed",
]
async def test_task_retries_receive_latest_task_run_in_context(
self, events_pipeline
):
state_names: List[str] = []
run_counts = []
start_times = []
# Added retry_delay_seconds as a regression check for https://github.com/PrefectHQ/prefect/issues/15422
@task(retries=3, retry_delay_seconds=1)
def flaky_function():
ctx = TaskRunContext.get()
state_names.append(ctx.task_run.state_name)
run_counts.append(ctx.task_run.run_count)
start_times.append(ctx.start_time)
raise ValueError()
@flow
def test_flow():
flaky_function()
with pytest.raises(ValueError):
test_flow()
expected_state_names = [
"Running",
"Retrying",
"Retrying",
"Retrying",
]
assert len(state_names) == len(expected_state_names) == len(run_counts)
for i in range(len(state_names)):
assert run_counts[i] == i + 1
assert state_names[i] == expected_state_names[i]
if i > 0:
last_start_time = start_times[i - 1]
assert last_start_time < start_times[i], (
"Timestamps should be increasing"
)
async def test_global_task_retry_config(self):
with temporary_settings(updates={PREFECT_TASK_DEFAULT_RETRIES: "1"}):
mock = MagicMock()
exc = ValueError()
@task()
def flaky_function():
mock()
if mock.call_count == 2:
return True
raise exc
@flow
def test_flow():
future = flaky_function.submit()
return future.wait()
test_flow()
assert mock.call_count == 2
@pytest.mark.parametrize(
("retries_configured", "expected_log_fragment"),
[
(0, None), # No retry-specific message when no retries configured
(1, "Retries are exhausted"),
],
)
async def test_task_retry_logging(
self,
caplog: pytest.LogCaptureFixture,
retries_configured: int,
expected_log_fragment: Optional[str],
):
caplog.set_level(logging.ERROR, logger="prefect.task_engine")
exc = ValueError("Test Exception")
@task(retries=retries_configured)
def failing_task():
raise exc
@flow
def test_flow():
try:
failing_task()
except ValueError:
pass # Expected
test_flow()
found_error_message = False
for record in caplog.records:
if record.levelname == "ERROR" and str(exc) in record.message:
found_error_message = True
# Check for expected retry message only if retries are configured
if expected_log_fragment:
assert expected_log_fragment in record.message
else:
# When no retries configured, ensure no retry suffix is added
assert "Retries are exhausted" not in record.message
assert "No retries configured" not in record.message
break
assert found_error_message, "Expected error log message not found."
| TestTaskRetries |
python | openai__openai-python | src/openai/types/chat/completion_create_params.py | {
"start": 1593,
"end": 14442
} | class ____(TypedDict, total=False):
messages: Required[Iterable[ChatCompletionMessageParam]]
"""A list of messages comprising the conversation so far.
Depending on the [model](https://platform.openai.com/docs/models) you use,
different message types (modalities) are supported, like
[text](https://platform.openai.com/docs/guides/text-generation),
[images](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio).
"""
model: Required[Union[str, ChatModel]]
"""Model ID used to generate the response, like `gpt-4o` or `o3`.
OpenAI offers a wide range of models with different capabilities, performance
characteristics, and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
"""
audio: Optional[ChatCompletionAudioParam]
"""Parameters for audio output.
Required when audio output is requested with `modalities: ["audio"]`.
[Learn more](https://platform.openai.com/docs/guides/audio).
"""
frequency_penalty: Optional[float]
"""Number between -2.0 and 2.0.
Positive values penalize new tokens based on their existing frequency in the
text so far, decreasing the model's likelihood to repeat the same line verbatim.
"""
function_call: FunctionCall
"""Deprecated in favor of `tool_choice`.
Controls which (if any) function is called by the model.
`none` means the model will not call a function and instead generates a message.
`auto` means the model can pick between generating a message or calling a
function.
Specifying a particular function via `{"name": "my_function"}` forces the model
to call that function.
`none` is the default when no functions are present. `auto` is the default if
functions are present.
"""
functions: Iterable[Function]
"""Deprecated in favor of `tools`.
A list of functions the model may generate JSON inputs for.
"""
logit_bias: Optional[Dict[str, int]]
"""Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
bias is added to the logits generated by the model prior to sampling. The exact
effect will vary per model, but values between -1 and 1 should decrease or
increase likelihood of selection; values like -100 or 100 should result in a ban
or exclusive selection of the relevant token.
"""
logprobs: Optional[bool]
"""Whether to return log probabilities of the output tokens or not.
If true, returns the log probabilities of each output token returned in the
`content` of `message`.
"""
max_completion_tokens: Optional[int]
"""
An upper bound for the number of tokens that can be generated for a completion,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
"""
max_tokens: Optional[int]
"""
The maximum number of [tokens](/tokenizer) that can be generated in the chat
completion. This value can be used to control
[costs](https://openai.com/api/pricing/) for text generated via API.
This value is now deprecated in favor of `max_completion_tokens`, and is not
compatible with
[o-series models](https://platform.openai.com/docs/guides/reasoning).
"""
metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
modalities: Optional[List[Literal["text", "audio"]]]
"""
Output types that you would like the model to generate. Most models are capable
of generating text, which is the default:
`["text"]`
The `gpt-4o-audio-preview` model can also be used to
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
"""
n: Optional[int]
"""How many chat completion choices to generate for each input message.
Note that you will be charged based on the number of generated tokens across all
of the choices. Keep `n` as `1` to minimize costs.
"""
parallel_tool_calls: bool
"""
Whether to enable
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
"""
prediction: Optional[ChatCompletionPredictionContentParam]
"""
Static predicted output content, such as the content of a text file that is
being regenerated.
"""
presence_penalty: Optional[float]
"""Number between -2.0 and 2.0.
Positive values penalize new tokens based on whether they appear in the text so
far, increasing the model's likelihood to talk about new topics.
"""
prompt_cache_key: str
"""
Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
"""
prompt_cache_retention: Optional[Literal["in-memory", "24h"]]
"""The retention policy for the prompt cache.
Set to `24h` to enable extended prompt caching, which keeps cached prefixes
active for longer, up to a maximum of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
"""
reasoning_effort: Optional[ReasoningEffort]
"""
Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
"""
response_format: ResponseFormat
"""An object specifying the format that the model must output.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
"""
safety_identifier: str
"""
A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
seed: Optional[int]
"""
This feature is in Beta. If specified, our system will make a best effort to
sample deterministically, such that repeated requests with the same `seed` and
parameters should return the same result. Determinism is not guaranteed, and you
should refer to the `system_fingerprint` response parameter to monitor changes
in the backend.
"""
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]]
"""Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
"""
stop: Union[Optional[str], SequenceNotStr[str], None]
"""Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
"""
store: Optional[bool]
"""
Whether or not to store the output of this chat completion request for use in
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
"""
stream_options: Optional[ChatCompletionStreamOptionsParam]
"""Options for streaming response. Only set this when you set `stream: true`."""
temperature: Optional[float]
"""What sampling temperature to use, between 0 and 2.
Higher values like 0.8 will make the output more random, while lower values like
0.2 will make it more focused and deterministic. We generally recommend altering
this or `top_p` but not both.
"""
tool_choice: ChatCompletionToolChoiceOptionParam
"""
Controls which (if any) tool is called by the model. `none` means the model will
not call any tool and instead generates a message. `auto` means the model can
pick between generating a message or calling one or more tools. `required` means
the model must call one or more tools. Specifying a particular tool via
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
`none` is the default when no tools are present. `auto` is the default if tools
are present.
"""
tools: Iterable[ChatCompletionToolUnionParam]
"""A list of tools the model may call.
You can provide either
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
or [function tools](https://platform.openai.com/docs/guides/function-calling).
"""
top_logprobs: Optional[int]
"""
An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
"""
top_p: Optional[float]
"""
An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
"""
user: str
"""This field is being replaced by `safety_identifier` and `prompt_cache_key`.
Use `prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
verbosity: Optional[Literal["low", "medium", "high"]]
"""Constrains the verbosity of the model's response.
Lower values will result in more concise responses, while higher values will
result in more verbose responses. Currently supported values are `low`,
`medium`, and `high`.
"""
web_search_options: WebSearchOptions
"""
This tool searches the web for relevant results to use in a response. Learn more
about the
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
"""
FunctionCall: TypeAlias = Union[Literal["none", "auto"], ChatCompletionFunctionCallOptionParam]
| CompletionCreateParamsBase |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_bootstrapped_ks_test_p_value.py | {
"start": 541,
"end": 5052
} | class ____(ColumnAggregateMetricProvider):
"""MetricProvider Class for Aggregate Standard Deviation metric"""
metric_name = "column.bootstrapped_ks_test_p_value"
value_keys = ("partition_object", "p", "bootstrap_sample", "bootstrap_sample_size")
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas( # noqa: C901 # FIXME CoP
cls,
column,
partition_object=None,
p=0.05,
bootstrap_samples=None,
bootstrap_sample_size=None,
**kwargs,
):
if not is_valid_continuous_partition_object(partition_object):
raise ValueError("Invalid continuous partition object.") # noqa: TRY003 # FIXME CoP
# TODO: consider changing this into a check that tail_weights does not exist exclusively, by moving this check into is_valid_continuous_partition_object # noqa: E501 # FIXME CoP
if (partition_object["bins"][0] == -np.inf) or (partition_object["bins"][-1] == np.inf):
raise ValueError("Partition endpoints must be finite.") # noqa: TRY003 # FIXME CoP
if "tail_weights" in partition_object and np.sum(partition_object["tail_weights"]) > 0:
raise ValueError("Partition cannot have tail weights -- endpoints must be finite.") # noqa: TRY003 # FIXME CoP
test_cdf = np.append(np.array([0]), np.cumsum(partition_object["weights"]))
def estimated_cdf(x):
return np.interp(x, partition_object["bins"], test_cdf)
if bootstrap_samples is None:
bootstrap_samples = 1000
if bootstrap_sample_size is None:
# Sampling too many elements (or not bootstrapping) will make the test too sensitive to the fact that we've # noqa: E501 # FIXME CoP
# compressed via a partition.
# Sampling too few elements will make the test insensitive to significant differences, especially # noqa: E501 # FIXME CoP
# for nonoverlapping ranges.
bootstrap_sample_size = len(partition_object["weights"]) * 2
results = [
stats.kstest(
NP_RANDOM_GENERATOR.choice(column, size=bootstrap_sample_size),
estimated_cdf,
)[1]
for _ in range(bootstrap_samples)
]
test_result = (1 + sum(x >= p for x in results)) / (bootstrap_samples + 1)
hist, _bin_edges = np.histogram(column, partition_object["bins"])
below_partition = len(np.where(column < partition_object["bins"][0])[0])
above_partition = len(np.where(column > partition_object["bins"][-1])[0])
# Expand observed partition to report, if necessary
if below_partition > 0 and above_partition > 0:
observed_bins = [np.min(column)] + partition_object["bins"] + [np.max(column)]
observed_weights = np.concatenate(([below_partition], hist, [above_partition])) / len(
column
)
elif below_partition > 0:
observed_bins = [np.min(column)] + partition_object["bins"]
observed_weights = np.concatenate(([below_partition], hist)) / len(column)
elif above_partition > 0:
observed_bins = partition_object["bins"] + [np.max(column)]
observed_weights = np.concatenate((hist, [above_partition])) / len(column)
else:
observed_bins = partition_object["bins"]
observed_weights = hist / len(column)
observed_cdf_values = np.cumsum(observed_weights)
# TODO: How should this metric's return_obj be structured?
return_obj = {
"observed_value": test_result,
"details": {
"bootstrap_samples": bootstrap_samples,
"bootstrap_sample_size": bootstrap_sample_size,
"observed_partition": {
"bins": observed_bins,
"weights": observed_weights.tolist(),
},
"expected_partition": {
"bins": partition_object["bins"],
"weights": partition_object["weights"],
},
"observed_cdf": {
"x": observed_bins,
"cdf_values": [0] + observed_cdf_values.tolist(),
},
"expected_cdf": {
"x": partition_object["bins"],
"cdf_values": test_cdf.tolist(),
},
},
}
return return_obj
| ColumnBootstrappedKSTestPValue |
python | huggingface__transformers | src/transformers/models/aimv2/modular_aimv2.py | {
"start": 15621,
"end": 16073
} | class ____(SiglipAttention):
def __init__(self, config):
super().__init__(config)
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
| Aimv2Attention |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/TC008.py | {
"start": 1136,
"end": 1617
} | class ____:
a: TypeAlias = 'Baz' # OK
type A = 'Baz' # TC008
class Nested:
a: TypeAlias = 'Baz' # OK
type A = 'Baz' # TC008
# O should have parenthesis added
o: TypeAlias = """int
| None"""
type O = """int
| None"""
# P, Q, and R should not have parenthesis added
p: TypeAlias = ("""int
| None""")
type P = ("""int
| None""")
q: TypeAlias = """(int
| None)"""
type Q = """(int
| None)"""
r: TypeAlias = """int | None"""
type R = """int | None""" | Baz |
python | redis__redis-py | tests/test_asyncio/test_cluster.py | {
"start": 123542,
"end": 130416
} | class ____:
"""
Tests for SSL connections.
This relies on the --redis-ssl-url for building the client and connecting to the
appropriate port.
"""
@pytest_asyncio.fixture()
def create_client(self, request: FixtureRequest) -> Callable[..., RedisCluster]:
ssl_url = request.config.option.redis_ssl_url
ssl_host, ssl_port = urlparse(ssl_url)[1].split(":")
self.client_cert, self.client_key, self.ca_cert = get_tls_certificates(
"cluster"
)
async def _create_client(mocked: bool = True, **kwargs: Any) -> RedisCluster:
if mocked:
with mock.patch.object(
ClusterNode, "execute_command", autospec=True
) as execute_command_mock:
async def execute_command(self, *args, **kwargs):
if args[0] == "INFO":
return {"cluster_enabled": True}
if args[0] == "CLUSTER SLOTS":
return [[0, 16383, [ssl_host, ssl_port, "ssl_node"]]]
if args[0] == "COMMAND":
return {
"ping": {
"name": "ping",
"arity": -1,
"flags": ["stale", "fast"],
"first_key_pos": 0,
"last_key_pos": 0,
"step_count": 0,
}
}
raise NotImplementedError()
execute_command_mock.side_effect = execute_command
rc = await RedisCluster(host=ssl_host, port=ssl_port, **kwargs)
assert len(rc.get_nodes()) == 1
node = rc.get_default_node()
assert node.port == int(ssl_port)
return rc
return await RedisCluster(host=ssl_host, port=ssl_port, **kwargs)
return _create_client
async def test_ssl_connection_without_ssl(
self, create_client: Callable[..., Awaitable[RedisCluster]]
) -> None:
with pytest.raises(RedisClusterException) as e:
await create_client(mocked=False, ssl=False)
e = e.value.__cause__
assert "Connection closed by server" in str(e)
async def test_ssl_with_invalid_cert(
self, create_client: Callable[..., Awaitable[RedisCluster]]
) -> None:
with pytest.raises(RedisClusterException) as e:
await create_client(mocked=False, ssl=True)
e = e.value.__cause__.__context__
assert "SSL: CERTIFICATE_VERIFY_FAILED" in str(e)
async def test_ssl_connection(
self, create_client: Callable[..., Awaitable[RedisCluster]]
) -> None:
async with await create_client(ssl=True, ssl_cert_reqs="none") as rc:
assert await rc.ping()
@pytest.mark.parametrize(
"ssl_ciphers",
[
"AES256-SHA:DHE-RSA-AES256-SHA:AES128-SHA:DHE-RSA-AES128-SHA",
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES128-GCM-SHA256",
],
)
async def test_ssl_connection_tls12_custom_ciphers(
self, ssl_ciphers, create_client: Callable[..., Awaitable[RedisCluster]]
) -> None:
async with await create_client(
ssl=True,
ssl_cert_reqs="none",
ssl_min_version=ssl.TLSVersion.TLSv1_2,
ssl_ciphers=ssl_ciphers,
) as rc:
assert await rc.ping()
async def test_ssl_connection_tls12_custom_ciphers_invalid(
self, create_client: Callable[..., Awaitable[RedisCluster]]
) -> None:
async with await create_client(
ssl=True,
ssl_cert_reqs="none",
ssl_min_version=ssl.TLSVersion.TLSv1_2,
ssl_ciphers="foo:bar",
) as rc:
with pytest.raises(RedisClusterException) as e:
assert await rc.ping()
assert "Redis Cluster cannot be connected" in str(e.value)
@pytest.mark.parametrize(
"ssl_ciphers",
[
"TLS_CHACHA20_POLY1305_SHA256",
"TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256",
],
)
async def test_ssl_connection_tls13_custom_ciphers(
self, ssl_ciphers, create_client: Callable[..., Awaitable[RedisCluster]]
) -> None:
# TLSv1.3 does not support changing the ciphers
async with await create_client(
ssl=True,
ssl_cert_reqs="none",
ssl_min_version=ssl.TLSVersion.TLSv1_2,
ssl_ciphers=ssl_ciphers,
) as rc:
with pytest.raises(RedisClusterException) as e:
assert await rc.ping()
assert "Redis Cluster cannot be connected" in str(e.value)
async def test_validating_self_signed_certificate(
self, create_client: Callable[..., Awaitable[RedisCluster]]
) -> None:
# ssl_check_hostname=False is used to avoid hostname verification
# in the test environment, where the server certificate is self-signed
# and does not match the hostname that is extracted for the cluster.
# Cert hostname is 'localhost' in the cluster initialization when using
# 'localhost' it gets transformed into 127.0.0.1
# In production code, ssl_check_hostname should be set to True
# to ensure proper hostname verification.
async with await create_client(
ssl=True,
ssl_ca_certs=self.ca_cert,
ssl_cert_reqs="required",
ssl_certfile=self.client_cert,
ssl_keyfile=self.client_key,
ssl_check_hostname=False,
) as rc:
assert await rc.ping()
async def test_validating_self_signed_string_certificate(
self, create_client: Callable[..., Awaitable[RedisCluster]]
) -> None:
with open(self.ca_cert) as f:
cert_data = f.read()
# ssl_check_hostname=False is used to avoid hostname verification
# in the test environment, where the server certificate is self-signed
# and does not match the hostname that is extracted for the cluster.
# Cert hostname is 'localhost' in the cluster initialization when using
# 'localhost' it gets transformed into 127.0.0.1
# In production code, ssl_check_hostname should be set to True
# to ensure proper hostname verification.
async with await create_client(
ssl=True,
ssl_ca_data=cert_data,
ssl_cert_reqs="required",
ssl_check_hostname=False,
ssl_certfile=self.client_cert,
ssl_keyfile=self.client_key,
) as rc:
assert await rc.ping()
| TestSSL |
python | openai__openai-python | src/openai/types/responses/response_input_item_param.py | {
"start": 9164,
"end": 9420
} | class ____(TypedDict, total=False):
path: Required[str]
"""Path of the file to delete relative to the workspace root."""
type: Required[Literal["delete_file"]]
"""The operation type. Always `delete_file`."""
| ApplyPatchCallOperationDeleteFile |
python | getsentry__sentry | tests/sentry/web/frontend/test_vsts_extension_configuration.py | {
"start": 238,
"end": 3318
} | class ____(TestCase):
@property
def path(self) -> str:
return reverse("vsts-extension-configuration")
def setUp(self) -> None:
self.user = self.create_user()
self.org = self.create_organization()
self.create_member(user_id=self.user.id, organization=self.org, role="admin")
def test_logged_in_one_org(self) -> None:
self.login_as(self.user)
resp = self.client.get(self.path, {"targetId": "1", "targetName": "foo"})
# Goes straight to VSTS OAuth
assert resp.status_code == 302
assert resp.headers["Location"].startswith(
"https://app.vssps.visualstudio.com/oauth2/authorize"
)
def test_logged_in_many_orgs(self) -> None:
self.login_as(self.user)
org = self.create_organization()
self.create_member(user_id=self.user.id, organization=org)
resp = self.client.get(self.path, {"targetId": "1", "targetName": "foo"})
assert resp.status_code == 302
assert "/extensions/vsts/link/" in resp.headers["Location"]
def test_choose_org(self) -> None:
self.login_as(self.user)
resp = self.client.get(
self.path, {"targetId": "1", "targetName": "foo", "orgSlug": self.org.slug}
)
assert resp.status_code == 302
assert resp.headers["Location"].startswith(
"https://app.vssps.visualstudio.com/oauth2/authorize"
)
def test_logged_out(self) -> None:
query = {"targetId": "1", "targetName": "foo"}
resp = self.client.get(self.path, query)
assert resp.status_code == 302
assert "/auth/login/" in resp.headers["Location"]
# Verify URL encoded post-login redirect URL
next_parts = urlparse(dict(parse_qsl(urlparse(resp.headers["Location"]).query))["next"])
assert next_parts.path == "/extensions/vsts/configure/"
assert dict(parse_qsl(next_parts.query)) == query
@override_settings(SENTRY_FEATURES={})
def test_goes_to_setup_unregisted_feature(self) -> None:
self.login_as(self.user)
resp = self.client.get(self.path, {"targetId": "1", "targetName": "foo"})
assert resp.status_code == 302
assert resp.headers["Location"].startswith(
"https://app.vssps.visualstudio.com/oauth2/authorize"
)
def test_missing_parameters(self) -> None:
self.login_as(self.user)
resp = self.client.get(self.path, {"targetId": "1"})
assert resp.status_code == 200
assert b"Missing required targetName parameter" in resp.content
resp = self.client.get(self.path, {"targetName": "foo"})
assert resp.status_code == 200
assert b"Missing required targetId parameter" in resp.content
def test_invalid_account_name(self) -> None:
self.login_as(self.user)
resp = self.client.get(self.path, {"targetId": "1", "targetName": "example.com/"})
assert resp.status_code == 200
assert b"Invalid targetName parameter" in resp.content
| VstsExtensionConfigurationTest |
python | cython__cython | Demos/benchmarks/bm_comprehensions.py | {
"start": 178,
"end": 241
} | class ____(Enum):
BIG = 1
SMALL = 2
@dataclass
| WidgetKind |
python | pytorch__pytorch | torch/_inductor/mkldnn_ir.py | {
"start": 26776,
"end": 28063
} | class ____(ExternKernelAlloc):
def __init__(
self,
layout,
inputs,
constant_args=(),
) -> None:
super().__init__(
layout,
inputs,
constant_args,
None,
op_overload=torch.ops.mkl._mkl_linear.default,
)
def codegen(self, wrapper):
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
super().codegen(wrapper)
@classmethod
def create(cls, x, packed_w, orig_w, B, batch_size):
x = cls.require_stride1(cls.realize_input(x))
orig_w = cls.require_stride1(cls.realize_input(orig_w))
*m, _ = x.get_size()
oc, _ = orig_w.get_size()
output_size = list(m) + [oc]
output_stride = FlexibleLayout.contiguous_strides(output_size)
inputs = [x, packed_w, orig_w]
constant_args = [batch_size]
if B is not None:
inputs += [B]
else:
constant_args.insert(0, None)
device = x.get_device()
assert device is not None
return MKLPackedLinear(
layout=FixedLayout(device, x.get_dtype(), output_size, output_stride),
inputs=inputs,
constant_args=constant_args,
)
| MKLPackedLinear |
python | pytorch__pytorch | test/dynamo/test_guard_serialization.py | {
"start": 1272,
"end": 1639
} | class ____(torch.nn.Module):
def __init__(self, submodule=None):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.param = torch.nn.Parameter(torch.randn(3, 2))
self.nested = submodule or GlobalModule()
def forward(self, x):
return self.linear(x) + 1
def global_func(x):
return x + 1
| GlobalNestedModule |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/descriptor_props.py | {
"start": 34541,
"end": 40846
} | class ____(DescriptorProperty[_T]):
"""Denote an attribute name as a synonym to a mapped property,
in that the attribute will mirror the value and expression behavior
of another attribute.
:class:`.Synonym` is constructed using the :func:`_orm.synonym`
function.
.. seealso::
:ref:`synonyms` - Overview of synonyms
"""
comparator_factory: Optional[Type[PropComparator[_T]]]
def __init__(
self,
name: str,
map_column: Optional[bool] = None,
descriptor: Optional[Any] = None,
comparator_factory: Optional[Type[PropComparator[_T]]] = None,
attribute_options: Optional[_AttributeOptions] = None,
info: Optional[_InfoType] = None,
doc: Optional[str] = None,
):
super().__init__(attribute_options=attribute_options)
self.name = name
self.map_column = map_column
self.descriptor = descriptor
self.comparator_factory = comparator_factory
if doc:
self.doc = doc
elif descriptor and descriptor.__doc__:
self.doc = descriptor.__doc__
else:
self.doc = None
if info:
self.info.update(info)
util.set_creation_order(self)
if not TYPE_CHECKING:
@property
def uses_objects(self) -> bool:
return getattr(self.parent.class_, self.name).impl.uses_objects
# TODO: when initialized, check _proxied_object,
# emit a warning if its not a column-based property
@util.memoized_property
def _proxied_object(
self,
) -> Union[MapperProperty[_T], SQLORMOperations[_T]]:
attr = getattr(self.parent.class_, self.name)
if not hasattr(attr, "property") or not isinstance(
attr.property, MapperProperty
):
# attribute is a non-MapperProprerty proxy such as
# hybrid or association proxy
if isinstance(attr, attributes.QueryableAttribute):
return attr.comparator
elif isinstance(attr, SQLORMOperations):
# assocaition proxy comes here
return attr
raise sa_exc.InvalidRequestError(
"""synonym() attribute "%s.%s" only supports """
"""ORM mapped attributes, got %r"""
% (self.parent.class_.__name__, self.name, attr)
)
return attr.property
def _column_strategy_attrs(self) -> Sequence[QueryableAttribute[Any]]:
return (getattr(self.parent.class_, self.name),)
def _comparator_factory(self, mapper: Mapper[Any]) -> SQLORMOperations[_T]:
prop = self._proxied_object
if isinstance(prop, MapperProperty):
if self.comparator_factory:
comp = self.comparator_factory(prop, mapper)
else:
comp = prop.comparator_factory(prop, mapper)
return comp
else:
return prop
def get_history(
self,
state: InstanceState[Any],
dict_: _InstanceDict,
passive: PassiveFlag = PassiveFlag.PASSIVE_OFF,
) -> History:
attr: QueryableAttribute[Any] = getattr(self.parent.class_, self.name)
return attr.impl.get_history(state, dict_, passive=passive)
def _get_dataclass_setup_options(
self,
decl_scan: _ClassScanAbstractConfig,
key: str,
dataclass_setup_arguments: _DataclassArguments,
enable_descriptor_defaults: bool,
) -> _AttributeOptions:
dataclasses_default = self._attribute_options.dataclasses_default
if (
dataclasses_default is not _NoArg.NO_ARG
and not callable(dataclasses_default)
and enable_descriptor_defaults
and not getattr(
decl_scan.cls, "_sa_disable_descriptor_defaults", False
)
):
proxied = decl_scan.collected_attributes[self.name]
proxied_default = proxied._attribute_options.dataclasses_default
if proxied_default != dataclasses_default:
raise sa_exc.ArgumentError(
f"Synonym {key!r} default argument "
f"{dataclasses_default!r} must match the dataclasses "
f"default value of proxied object {self.name!r}, "
f"""currently {
repr(proxied_default)
if proxied_default is not _NoArg.NO_ARG
else 'not set'}"""
)
self._default_scalar_value = dataclasses_default
return self._attribute_options._replace(
dataclasses_default=DONT_SET
)
return self._attribute_options
@util.preload_module("sqlalchemy.orm.properties")
def set_parent(self, parent: Mapper[Any], init: bool) -> None:
properties = util.preloaded.orm_properties
if self.map_column:
# implement the 'map_column' option.
if self.key not in parent.persist_selectable.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
% (
self.name,
parent.persist_selectable.description,
self.key,
)
)
elif (
parent.persist_selectable.c[self.key]
in parent._columntoproperty
and parent._columntoproperty[
parent.persist_selectable.c[self.key]
].key
== self.name
):
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r"
% (self.key, self.name, self.name, self.key)
)
p: ColumnProperty[Any] = properties.ColumnProperty(
parent.persist_selectable.c[self.key]
)
parent._configure_property(self.name, p, init=init, setparent=True)
p._mapped_by_synonym = self.key
self.parent = parent
| SynonymProperty |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 93293,
"end": 94934
} | class ____(Response):
"""
Response of datasets.delete endpoint.
:param deleted:
:type deleted: bool
:param deleted_versions:
:type deleted_versions: Sequence[str]
"""
_service = "datasets"
_action = "delete"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"deleted": {"type": ["boolean", "null"]},
"deleted_versions": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, deleted=None, deleted_versions=None, **kwargs):
super(DeleteResponse, self).__init__(**kwargs)
self.deleted = deleted
self.deleted_versions = deleted_versions
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
self.assert_isinstance(value, "deleted", (bool,))
self._property_deleted = value
@schema_property("deleted_versions")
def deleted_versions(self):
return self._property_deleted_versions
@deleted_versions.setter
def deleted_versions(self, value):
if value is None:
self._property_deleted_versions = None
return
self.assert_isinstance(value, "deleted_versions", (list, tuple))
self.assert_isinstance(
value, "deleted_versions", six.string_types, is_array=True
)
self._property_deleted_versions = value
| DeleteResponse |
python | doocs__leetcode | solution/0600-0699/0694.Number of Distinct Islands/Solution.py | {
"start": 0,
"end": 748
} | class ____:
def numDistinctIslands(self, grid: List[List[int]]) -> int:
def dfs(i: int, j: int, k: int):
grid[i][j] = 0
path.append(str(k))
dirs = (-1, 0, 1, 0, -1)
for h in range(1, 5):
x, y = i + dirs[h - 1], j + dirs[h]
if 0 <= x < m and 0 <= y < n and grid[x][y]:
dfs(x, y, h)
path.append(str(-k))
paths = set()
path = []
m, n = len(grid), len(grid[0])
for i, row in enumerate(grid):
for j, x in enumerate(row):
if x:
dfs(i, j, 0)
paths.add("".join(path))
path.clear()
return len(paths)
| Solution |
python | marshmallow-code__marshmallow | src/marshmallow/fields.py | {
"start": 71325,
"end": 72154
} | class ____(Field[_ContantT]):
"""A field that (de)serializes to a preset constant. If you only want the
constant added for serialization or deserialization, you should use
``dump_only=True`` or ``load_only=True`` respectively.
:param constant: The constant to return for the field attribute.
"""
_CHECK_ATTRIBUTE = False
def __init__(self, constant: _ContantT, **kwargs: Unpack[_BaseFieldKwargs]):
super().__init__(**kwargs)
self.constant = constant
self.load_default = constant
self.dump_default = constant
def _serialize(self, value, *args, **kwargs) -> _ContantT:
return self.constant
def _deserialize(self, value, *args, **kwargs) -> _ContantT:
return self.constant
# Aliases
URL = Url
Str = String
Bool = Boolean
Int = Integer
| Constant |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_np_array_ops_test.py | {
"start": 2749,
"end": 19008
} | class ____(test.TestCase):
def setUp(self):
super(ArrayCreationTest, self).setUp()
set_up_virtual_devices()
python_shapes = [
0, 1, 2, (), (1,), (2,), (1, 2, 3), [], [1], [2], [1, 2, 3]
]
self.shape_transforms = [
lambda x: x, lambda x: np.array(x, dtype=int),
lambda x: np_array_ops.array(x, dtype=int), tensor_shape.TensorShape
]
self.all_shapes = []
for fn in self.shape_transforms:
self.all_shapes.extend([fn(s) for s in python_shapes])
if sys.version_info.major == 3:
# There is a bug of np.empty (and alike) in Python 3 causing a crash when
# the `shape` argument is an np_arrays.ndarray scalar (or tf.Tensor
# scalar).
def not_ndarray_scalar(s):
return not (isinstance(s, np_arrays.ndarray) and s.ndim == 0)
self.all_shapes = list(filter(not_ndarray_scalar, self.all_shapes))
source_array_data = [
1,
5.5,
7,
(),
(8, 10.),
((), ()),
((1, 4), (2, 8)),
[],
[7],
[8, 10.],
[[], []],
[[1, 4], [2, 8]],
([], []),
([1, 4], [2, 8]),
[(), ()],
[(1, 4), (2, 8)],
]
self.array_transforms = [
lambda x: x,
np_array_ops.array,
_get_weak_tensor,
]
self.all_arrays = []
for fn in self.array_transforms:
self.all_arrays.extend([fn(s) for s in source_array_data])
def testEmptyLikeOnWeakInputs(self):
for a in self.all_arrays:
expected = np.empty_like(a)
actual = np_array_ops.empty_like(a)
msg = 'array: {}'.format(a)
self.match_shape(actual, expected, msg)
self.match_dtype_and_type(
actual,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg,
)
for a, t in itertools.product(self.all_arrays, _all_types):
actual = np_array_ops.empty_like(a, t)
expected = np.empty_like(a, t)
msg = 'array: {} type: {}'.format(a, t)
self.match_shape(actual, expected, msg)
# empty_like returns a Tensor if dtype is specified.
self.match_dtype_and_type(actual, expected.dtype, tensor.Tensor, msg)
def testZerosLikeOnWeakInputs(self):
for a in self.all_arrays:
actual = np_array_ops.zeros_like(a)
expected = np.zeros_like(a)
msg = 'array: {}'.format(a)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg,
)
for a, t in itertools.product(self.all_arrays, _all_types):
actual = np_array_ops.zeros_like(a, t)
expected = np.zeros_like(a, t)
msg = 'array: {} type: {}'.format(a, t)
self.match_expected_attrs(
actual, expected, expected.dtype, tensor.Tensor, msg
)
def testOnes(self):
for s in self.all_shapes:
actual = np_array_ops.ones(s)
expected = np.ones(s)
msg = 'shape: {}'.format(s)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
tensor.Tensor,
msg,
)
for s, t in itertools.product(self.all_shapes, _all_types):
actual = np_array_ops.ones(s, t)
expected = np.ones(s, t)
msg = 'shape: {}, dtype: {}'.format(s, t)
self.match_expected_attrs(
actual, expected, expected.dtype, tensor.Tensor, msg
)
def testOnesLike(self):
for a in self.all_arrays:
actual = np_array_ops.ones_like(a)
expected = np.ones_like(a)
msg = 'array: {}'.format(a)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg,
)
for a, t in itertools.product(self.all_arrays, _all_types):
actual = np_array_ops.ones_like(a, t)
expected = np.ones_like(a, t)
msg = 'array: {} type: {}'.format(a, t)
self.match_expected_attrs(
actual, expected, expected.dtype, tensor.Tensor, msg
)
def testFullLike(self):
# List of 2-tuples of fill value and shape.
data = [
(5, ()),
(5, (7,)),
(5., (7,)),
([5, 8], (2,)),
([5, 8], (3, 2)),
([[5], [8]], (2, 3)),
([[5], [8]], (3, 2, 5)),
([[5.], [8.]], (3, 2, 5)),
]
zeros_builders = [np_array_ops.zeros, np.zeros]
for f, s in data:
for fn1, fn2, arr_dtype in itertools.product(
self.array_transforms, zeros_builders, _all_types
):
fill_value = fn1(f)
arr = fn2(s, arr_dtype)
wt_arr = _get_weak_tensor(arr)
expected = np.full_like(arr, fill_value)
self.match_expected_attrs(
np_array_ops.full_like(wt_arr, fill_value),
expected,
expected.dtype,
WeakTensor,
)
for dtype in _all_types:
self.match_expected_attrs(
np_array_ops.full_like(arr, fill_value, dtype=dtype),
np.full_like(arr, fill_value, dtype=dtype),
_NP_TO_TF[dtype],
tensor.Tensor,
)
def testArray(self):
ndmins = [0, 1, 2, 5]
for a, dtype, ndmin, copy in itertools.product(
self.all_arrays, _all_types, ndmins, [True, False]
):
# Dtype specified.
self.match_expected_attrs(
np_array_ops.array(a, dtype=dtype, ndmin=ndmin, copy=copy),
np.array(a, dtype=dtype, ndmin=ndmin),
dtype,
tensor.Tensor,
)
# No dtype specified.
actual = np_array_ops.array(a, ndmin=ndmin, copy=copy)
expected = np.array(a, ndmin=ndmin)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
def testAsArray(self):
for a, dtype in itertools.product(self.all_arrays, _all_types):
# Dtype specified.
self.match_expected_attrs(
np_array_ops.asarray(a, dtype=dtype),
np.asarray(a, dtype=dtype),
_NP_TO_TF[dtype],
tensor.Tensor,
)
# No dtype specified.
actual = np_array_ops.asarray(a)
expected = np.asarray(a)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
zeros_list = np_array_ops.zeros(5)
# Same instance is returned if no dtype is specified and input is ndarray.
self.assertIs(np_array_ops.asarray(zeros_list), zeros_list)
with ops.device('CPU:1'):
self.assertIs(np_array_ops.asarray(zeros_list), zeros_list)
# Different instance is returned if dtype is specified and input is ndarray.
self.assertIsNot(np_array_ops.asarray(zeros_list, dtype=int), zeros_list)
def testAsAnyArray(self):
for a, dtype in itertools.product(self.all_arrays, _all_types):
# Dtype specified.
self.match_expected_attrs(
np_array_ops.asanyarray(a, dtype=dtype),
np.asanyarray(a, dtype=dtype),
_NP_TO_TF[dtype],
tensor.Tensor,
)
# No dtype specified.
actual = np_array_ops.asanyarray(a)
expected = np.asanyarray(a)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
zeros_list = np_array_ops.zeros(5)
# Same instance is returned if no dtype is specified and input is ndarray.
self.assertIs(np_array_ops.asanyarray(zeros_list), zeros_list)
with ops.device('CPU:1'):
self.assertIs(np_array_ops.asanyarray(zeros_list), zeros_list)
# Different instance is returned if dtype is specified and input is ndarray.
self.assertIsNot(np_array_ops.asanyarray(zeros_list, dtype=int), zeros_list)
def testAsContiguousArray(self):
for a, dtype in itertools.product(self.all_arrays, _all_types):
# Dtype specified.
self.match_expected_attrs(
np_array_ops.ascontiguousarray(a, dtype=dtype),
np.ascontiguousarray(a, dtype=dtype),
_NP_TO_TF[dtype],
tensor.Tensor,
)
# No dtype specified.
actual = np_array_ops.ascontiguousarray(a)
expected = np.ascontiguousarray(a)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
)
def testARange(self):
int_values = np.arange(-3, 3).tolist()
float_values = np.arange(-3.5, 3.5).tolist()
all_values = int_values + float_values
for dtype in _all_types:
for start in all_values:
msg = 'dtype:{} start:{}'.format(dtype, start)
# Dtype specified.
self.match_expected_attrs(
np_array_ops.arange(start, dtype=dtype),
np.arange(start, dtype=dtype),
_NP_TO_TF[dtype],
tensor.Tensor,
msg=msg,
)
# No dtype specified.
actual = np_array_ops.arange(start)
expected = np.arange(start)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg=msg,
)
for stop in all_values:
msg = 'dtype:{} start:{} stop:{}'.format(dtype, start, stop)
# TODO(srbs): Investigate and remove check.
# There are some bugs when start or stop is float and dtype is int.
if not isinstance(start, float) and not isinstance(stop, float):
# Dtype specfieid.
self.match_expected_attrs(
np_array_ops.arange(start, stop, dtype=dtype),
np.arange(start, stop, dtype=dtype),
_NP_TO_TF[dtype],
tensor.Tensor,
msg=msg,
)
# No dtype specified.
actual = np_array_ops.arange(start, stop)
expected = np.arange(start, stop)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg=msg,
)
# Note: We intentionally do not test with float values for step
# because numpy.arange itself returns inconsistent results. e.g.
# np.arange(0.5, 3, step=0.5, dtype=int) returns
# array([0, 1, 2, 3, 4])
for step in int_values:
msg = 'dtype:{} start:{} stop:{} step:{}'.format(
dtype, start, stop, step)
if not step:
with self.assertRaises(ValueError):
actual = np_array_ops.arange(start, stop, step)
expected = np.arange(start, stop, step)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg=msg,
)
if not isinstance(start, float) and not isinstance(stop, float):
self.match_expected_attrs(
np_array_ops.arange(start, stop, step, dtype=dtype),
np.arange(start, stop, step, dtype=dtype),
_NP_TO_TF[dtype],
tensor.Tensor,
msg=msg,
)
else:
if not isinstance(start, float) and not isinstance(stop, float):
actual = np_array_ops.arange(start, stop, step)
expected = np.arange(start, stop, step)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg=msg,
)
self.match_expected_attrs(
np_array_ops.arange(start, stop, step, dtype=dtype),
np.arange(start, stop, step, dtype=dtype),
_NP_TO_TF[dtype],
tensor.Tensor,
msg=msg,
)
def testDiag(self):
array_transforms = [
lambda x: x, # Identity,
_get_weak_tensor,
np_array_ops.array,
]
def run_test(arr):
for fn in array_transforms:
arr = fn(arr)
actual = np_array_ops.diag(arr)
expected = np.diag(arr)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg='diag({})'.format(arr),
)
for k in range(-3, 3):
actual = np_array_ops.diag(arr, k)
expected = np.diag(arr, k)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg='diag({}, k={})'.format(arr, k),
)
# 2-d arrays.
run_test(np.arange(9).reshape((3, 3)).tolist())
run_test(np.arange(6).reshape((2, 3)).tolist())
run_test(np.arange(6).reshape((3, 2)).tolist())
run_test(np.arange(3).reshape((1, 3)).tolist())
run_test(np.arange(3).reshape((3, 1)).tolist())
run_test([[5]])
run_test([[]])
run_test([[], []])
# 1-d arrays.
run_test([])
run_test([1])
run_test([1, 2])
def testDiagFlat(self):
array_transforms = [
lambda x: x, # Identity,
_get_weak_tensor,
np_array_ops.array,
]
def run_test(arr):
for fn in array_transforms:
arr = fn(arr)
actual = np_array_ops.diagflat(arr)
expected = np.diagflat(arr)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg='diagflat({})'.format(arr),
)
for k in range(-3, 3):
actual = np_array_ops.diagflat(arr, k)
expected = np.diagflat(arr, k)
self.match_expected_attrs(
actual,
expected,
_NP_to_TF_result_inferred_types[expected.dtype],
WeakTensor,
msg='diagflat({})'.format(arr),
)
# 1-d arrays.
run_test([])
run_test([1])
run_test([1, 2])
# 2-d arrays.
run_test([[]])
run_test([[5]])
run_test([[], []])
run_test(np.arange(4).reshape((2, 2)).tolist())
run_test(np.arange(2).reshape((2, 1)).tolist())
run_test(np.arange(2).reshape((1, 2)).tolist())
# 3-d arrays
run_test(np.arange(8).reshape((2, 2, 2)).tolist())
def match_shape(self, actual, expected, msg=None):
if msg:
msg = 'Shape match failed for: {}. Expected: {} Actual: {}'.format(
msg, expected.shape, actual.shape)
self.assertEqual(actual.shape, expected.shape, msg=msg)
def match_dtype_and_type(self, actual, expected_dtype, res_type, msg=None):
if msg:
msg = (
'Dtype and type match failed for: {}. Expected dtype: {} Actual'
' dtype: {}. Expected type: {} Actual type: {}.'.format(
msg, expected_dtype, actual.dtype, res_type, type(actual)
)
)
self.assertIsInstance(actual, res_type)
self.assertEqual(actual.dtype, expected_dtype, msg=msg)
def match_expected_attrs(
self,
actual,
expected,
expected_dtype,
res_type,
msg=None,
almost=False,
decimal=7,
):
msg_ = 'Expected: {} Actual: {}'.format(expected, actual)
if msg:
msg = '{} {}'.format(msg_, msg)
else:
msg = msg_
self.assertIsInstance(actual, res_type)
self.match_dtype_and_type(actual, expected_dtype, res_type, msg)
self.match_shape(actual, expected, msg)
if not almost:
if not actual.shape.rank:
self.assertEqual(actual.tolist(), expected.tolist())
else:
self.assertSequenceEqual(actual.tolist(), expected.tolist())
else:
np.testing.assert_almost_equal(
actual.tolist(), expected.tolist(), decimal=decimal)
| ArrayCreationTest |
python | readthedocs__readthedocs.org | readthedocs/organizations/views/private.py | {
"start": 4842,
"end": 5147
} | class ____(PrivateViewMixin, OrganizationOwnerView, ListView):
template_name = "organizations/admin/owners_edit.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["form"] = self.form_class()
return context
| EditOrganizationOwners |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.