language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scipy__scipy | scipy/linalg/tests/test_matmul_toeplitz.py | {
"start": 217,
"end": 4088
} | class ____:
def setup_method(self):
self.rng = np.random.RandomState(42)
self.tolerance = 1.5e-13
def test_real(self):
cases = []
n = 1
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, False))
n = 2
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, False))
n = 101
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, True))
n = 1000
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, False))
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
cases.append((x, c, r, False))
n = 100
c = self.rng.normal(size=(n, 1))
r = self.rng.normal(size=(n, 1))
x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
cases.append((x, c, r, True))
n = 100
c = self.rng.normal(size=(n, 1))
r = None
x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
cases.append((x, c, r, True, -1))
n = 100
c = self.rng.normal(size=(n, 1))
r = None
x = self.rng.normal(size=n)
cases.append((x, c, r, False))
n = 101
c = self.rng.normal(size=n)
r = self.rng.normal(size=n-27)
x = self.rng.normal(size=(n-27, 1))
cases.append((x, c, r, True))
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n//4)
x = self.rng.normal(size=(n//4, self.rng.randint(1, 10)))
cases.append((x, c, r, True))
[self.do(*i) for i in cases]
def test_complex(self):
n = 127
c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
r = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
x = self.rng.normal(size=(n, 3)) + self.rng.normal(size=(n, 3))*1j
self.do(x, c, r, False)
n = 100
c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
r = self.rng.normal(size=(n//2, 1)) +\
self.rng.normal(size=(n//2, 1))*1j
x = self.rng.normal(size=(n//2, 3)) +\
self.rng.normal(size=(n//2, 3))*1j
self.do(x, c, r, False)
def test_empty(self):
c = []
r = []
x = []
self.do(x, c, r, False)
x = np.empty((0, 0))
self.do(x, c, r, False)
def test_exceptions(self):
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=2*n)
x = self.rng.normal(size=n)
assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=n-1)
assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n//2)
x = self.rng.normal(size=n//2-1)
assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
# For toeplitz matrices, matmul_toeplitz() should be equivalent to @.
def do(self, x, c, r=None, check_finite=False, workers=None):
c = np.ravel(c)
if r is None:
actual = matmul_toeplitz(c, x, check_finite, workers)
else:
r = np.ravel(r)
actual = matmul_toeplitz((c, r), x, check_finite)
desired = toeplitz(c, r) @ x
assert_allclose(actual, desired,
rtol=self.tolerance, atol=self.tolerance)
| TestMatmulToeplitz |
python | ansible__ansible | test/units/module_utils/datatag/test_datatag.py | {
"start": 8566,
"end": 11028
} | class ____:
names: t.List[str]
id_func: t.Callable[[t.Any], str] = _default_id_func
@classmethod
def get_paramdesc_from_hint(cls, annot) -> ParamDesc:
if annot:
for meta in getattr(annot, '__metadata__', []):
if isinstance(meta, ParamDesc):
return meta
return ParamDesc(names=[])
@classmethod
def get_test_param_values(
cls,
obj: t.Any,
name: str,
maybe_with_names: t.Iterable[str],
) -> tuple[t.Sequence[str], t.Iterable[t.Any], t.Callable[[object], str]]:
value = getattr(obj, name)
try:
if inspect.ismethod(value):
annot = get_type_hints(value, include_extras=True).get('return')
value = value()
else:
annot = get_type_hints(obj, include_extras=True).get(name)
except Exception as ex:
raise Exception(f"failed getting type hints for {obj!r} {name!r}") from ex
paramdesc = cls.get_paramdesc_from_hint(annot)
if not paramdesc.names:
paramdesc = dataclasses.replace(paramdesc, names=["value"])
col_count = len(paramdesc.names)
if col_count == 1:
col_count = 0 # HACK: don't require a wrapper container around single-element rows
maybe_with_names = set(maybe_with_names)
# simulate ordered set with no-values dict; the output order is not important but must be consistent per-row; use the input data order for now
matched_names = {n: None for n in paramdesc.names}
if not matched_names:
return [], [], str
out_values = []
# DTFIX-FUTURE: apply internal tagging/annotation to point at the source data row on test failure/error?
for rownum, row in enumerate(value or []):
if col_count:
# validate column count and filter the args, returning them in `matched_names` order
if len(row) != col_count:
raise ValueError(f"row {rownum} of {name!r} must contain exactly {col_count} value(s); found {len(row)}")
out_values.append([argvalue for argname, argvalue in zip(paramdesc.names, row, strict=True) if argname in matched_names])
else:
# just return the entire row as "value"
out_values.append([row])
return list(matched_names), out_values, paramdesc.id_func
| ParamDesc |
python | tensorflow__tensorflow | tensorflow/python/util/module_wrapper_test.py | {
"start": 7957,
"end": 8348
} | class ____(test.TestCase):
def testPickleSubmodule(self):
name = PickleTest.__module__ # The current module is a submodule.
module = module_wrapper.TFModuleWrapper(MockModule(name), name)
restored = pickle.loads(pickle.dumps(module))
self.assertEqual(restored.__name__, name)
self.assertIsNotNone(restored.PickleTest)
if __name__ == '__main__':
test.main()
| PickleTest |
python | spack__spack | lib/spack/spack/test/cmd/checksum.py | {
"start": 13982,
"end": 14717
} | class ____(Package):
homepage = "http://zlib.net"
url = "http://zlib.net/fossils/zlib-1.2.11.tar.gz"
version("1.2.13", sha256="abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") # FIXME
version("1.2.11", sha256="c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1")
version("1.2.8", sha256="36658cb768a54c1d4dec43c3116c27ed893e88b02ecfcb44f2166f9c0b7f2a0d")
version("1.2.5", sha256="abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") # FIXME
version("1.2.3", sha256="1795c7d067a43174113fdf03447532f373e1c6c57c08d61d9e4e9be5e244b05e")
variant("pic", default=True, description="test")
def install(self, spec, prefix):
make("install")
"""
)
| Zlib |
python | getsentry__sentry | tests/sentry/lang/native/test_symbolicator.py | {
"start": 9623,
"end": 10481
} | class ____:
@pytest.fixture
def builtin_sources(self):
return {
"ios": {
"id": "sentry:ios",
"name": "Apple",
"type": "alias",
"sources": ["ios-source", "tvos-source"],
},
"ios-source": {
"id": "sentry:ios-source",
"name": "iOS",
"type": "gcs",
},
"tvos-source": {
"id": "sentry:tvos-source",
"name": "TvOS",
"type": "gcs",
},
}
def test_reverse_aliases(self, builtin_sources) -> None:
reverse_aliases = reverse_aliases_map(builtin_sources)
expected = {"sentry:ios-source": "sentry:ios", "sentry:tvos-source": "sentry:ios"}
assert reverse_aliases == expected
| TestAliasReversion |
python | wandb__wandb | wandb/automations/_generated/fragments.py | {
"start": 2107,
"end": 2500
} | class ____(GQLResult):
typename__: Typename[Literal["NotificationTriggeredAction"]] = (
"NotificationTriggeredAction"
)
integration: Union[
NotificationActionFieldsIntegrationIntegration, SlackIntegrationFields
] = Field(discriminator="typename__")
title: Optional[str]
message: Optional[str]
severity: Optional[AlertSeverity]
| NotificationActionFields |
python | MongoEngine__mongoengine | mongoengine/base/fields.py | {
"start": 22677,
"end": 28323
} | class ____(BaseField):
"""A geo json field storing a geojson style object."""
_geo_index = pymongo.GEOSPHERE
_type = "GeoBase"
def __init__(self, auto_index=True, *args, **kwargs):
"""
:param bool auto_index: Automatically create a '2dsphere' index.\
Defaults to `True`.
"""
self._name = "%sField" % self._type
if not auto_index:
self._geo_index = False
super().__init__(*args, **kwargs)
def validate(self, value):
"""Validate the GeoJson object based on its type."""
if isinstance(value, dict):
if set(value.keys()) == {"type", "coordinates"}:
if value["type"] != self._type:
self.error(f'{self._name} type must be "{self._type}"')
return self.validate(value["coordinates"])
else:
self.error(
"%s can only accept a valid GeoJson dictionary"
" or lists of (x, y)" % self._name
)
return
elif not isinstance(value, (list, tuple)):
self.error("%s can only accept lists of [x, y]" % self._name)
return
validate = getattr(self, "_validate_%s" % self._type.lower())
error = validate(value)
if error:
self.error(error)
def _validate_polygon(self, value, top_level=True):
if not isinstance(value, (list, tuple)):
return "Polygons must contain list of linestrings"
# Quick and dirty validator
try:
value[0][0][0]
except (TypeError, IndexError):
return "Invalid Polygon must contain at least one valid linestring"
errors = []
for val in value:
error = self._validate_linestring(val, False)
if not error and val[0] != val[-1]:
error = "LineStrings must start and end at the same point"
if error and error not in errors:
errors.append(error)
if errors:
if top_level:
return "Invalid Polygon:\n%s" % ", ".join(errors)
else:
return "%s" % ", ".join(errors)
def _validate_linestring(self, value, top_level=True):
"""Validate a linestring."""
if not isinstance(value, (list, tuple)):
return "LineStrings must contain list of coordinate pairs"
# Quick and dirty validator
try:
value[0][0]
except (TypeError, IndexError):
return "Invalid LineString must contain at least one valid point"
errors = []
for val in value:
error = self._validate_point(val)
if error and error not in errors:
errors.append(error)
if errors:
if top_level:
return "Invalid LineString:\n%s" % ", ".join(errors)
else:
return "%s" % ", ".join(errors)
def _validate_point(self, value):
"""Validate each set of coords"""
if not isinstance(value, (list, tuple)):
return "Points must be a list of coordinate pairs"
elif not len(value) == 2:
return "Value (%s) must be a two-dimensional point" % repr(value)
elif not isinstance(value[0], (float, int)) or not isinstance(
value[1], (float, int)
):
return "Both values (%s) in point must be float or int" % repr(value)
def _validate_multipoint(self, value):
if not isinstance(value, (list, tuple)):
return "MultiPoint must be a list of Point"
# Quick and dirty validator
try:
value[0][0]
except (TypeError, IndexError):
return "Invalid MultiPoint must contain at least one valid point"
errors = []
for point in value:
error = self._validate_point(point)
if error and error not in errors:
errors.append(error)
if errors:
return "%s" % ", ".join(errors)
def _validate_multilinestring(self, value, top_level=True):
if not isinstance(value, (list, tuple)):
return "MultiLineString must be a list of LineString"
# Quick and dirty validator
try:
value[0][0][0]
except (TypeError, IndexError):
return "Invalid MultiLineString must contain at least one valid linestring"
errors = []
for linestring in value:
error = self._validate_linestring(linestring, False)
if error and error not in errors:
errors.append(error)
if errors:
if top_level:
return "Invalid MultiLineString:\n%s" % ", ".join(errors)
else:
return "%s" % ", ".join(errors)
def _validate_multipolygon(self, value):
if not isinstance(value, (list, tuple)):
return "MultiPolygon must be a list of Polygon"
# Quick and dirty validator
try:
value[0][0][0][0]
except (TypeError, IndexError):
return "Invalid MultiPolygon must contain at least one valid Polygon"
errors = []
for polygon in value:
error = self._validate_polygon(polygon, False)
if error and error not in errors:
errors.append(error)
if errors:
return "Invalid MultiPolygon:\n%s" % ", ".join(errors)
def to_mongo(self, value):
if isinstance(value, dict):
return value
return SON([("type", self._type), ("coordinates", value)])
| GeoJsonBaseField |
python | getsentry__sentry | src/sentry/incidents/endpoints/serializers/alert_rule.py | {
"start": 3381,
"end": 13364
} | class ____(Serializer):
"""
Serializer for returning an alert rule to the client
"""
def __init__(self, expand: list[str] | None = None, prepare_component_fields: bool = False):
self.expand = expand or []
self.prepare_component_fields = prepare_component_fields
def get_attrs(
self, item_list: Sequence[Any], user: User | RpcUser | AnonymousUser, **kwargs: Any
) -> defaultdict[AlertRule, Any]:
alert_rules = {item.id: item for item in item_list}
prefetch_related_objects(item_list, "snuba_query__environment")
result: defaultdict[AlertRule, dict[str, Any]] = defaultdict(dict)
triggers = AlertRuleTrigger.objects.filter(alert_rule__in=item_list).order_by("label")
serialized_triggers = serialize(list(triggers), **kwargs)
trigger_actions = AlertRuleTriggerAction.objects.filter(
alert_rule_trigger__alert_rule_id__in=alert_rules.keys()
).exclude(Q(sentry_app_config__isnull=True) | Q(sentry_app_id__isnull=True))
sentry_app_installations_by_sentry_app_id: Mapping[str, RpcSentryAppComponentContext] = {}
organization_ids = list({alert_rule.organization_id for alert_rule in alert_rules.values()})
if self.prepare_component_fields:
sentry_app_ids = list(trigger_actions.values_list("sentry_app_id", flat=True))
install_contexts = app_service.get_component_contexts(
filter={"app_ids": sentry_app_ids, "organization_id": organization_ids[0]},
component_type="alert-rule-action",
)
sentry_app_installations_by_sentry_app_id = {
str(context.installation.sentry_app.id): context
for context in install_contexts
if context.installation.sentry_app
}
for trigger, serialized in zip(triggers, serialized_triggers):
errors = []
alert_rule = alert_rules[trigger.alert_rule_id]
alert_rule_triggers = result[alert_rule].setdefault("triggers", [])
for action in serialized.get("actions", []):
if action is None:
continue
# Prepare AlertRuleTriggerActions that are SentryApp components
install_context = None
sentry_app_id = str(action.get("sentryAppId"))
if sentry_app_id:
install_context = sentry_app_installations_by_sentry_app_id.get(sentry_app_id)
if install_context:
rpc_install = install_context.installation
rpc_component = install_context.component
rpc_app = rpc_install.sentry_app
assert rpc_app
action["sentryAppInstallationUuid"] = rpc_install.uuid
component = (
prepare_ui_component(
rpc_install,
rpc_component,
None,
action.get("settings"),
)
if rpc_component
else None
)
if component is None:
errors.append({"detail": f"Could not fetch details from {rpc_app.name}"})
action["disabled"] = True
continue
action["formFields"] = component.app_schema.get("settings", {})
if errors:
result[alert_rule]["errors"] = errors
alert_rule_triggers.append(serialized)
alert_rule_projects = set()
for alert_rule in alert_rules.values():
if alert_rule.projects.exists():
for project in alert_rule.projects.all():
alert_rule_projects.add((alert_rule.id, project.slug))
snuba_alert_rule_projects = AlertRule.objects.filter(
id__in=[item.id for item in item_list]
).values_list("id", "projects__slug")
alert_rule_projects.update(
[(id, project_slug) for id, project_slug in snuba_alert_rule_projects if project_slug]
)
for alert_rule_id, project_slug in alert_rule_projects:
rule_result = result[alert_rules[alert_rule_id]].setdefault("projects", [])
rule_result.append(project_slug)
rule_activities = list(
AlertRuleActivity.objects.filter(
alert_rule__in=item_list, type=AlertRuleActivityType.CREATED.value
)
)
user_by_user_id: MutableMapping[int, RpcUser] = {
user.id: user
for user in user_service.get_many_by_id(
ids=[r.user_id for r in rule_activities if r.user_id is not None]
)
}
for rule_activity in rule_activities:
if rule_activity.user_id is not None:
rpc_user = user_by_user_id.get(rule_activity.user_id)
else:
rpc_user = None
if rpc_user:
created_by = dict(
id=rpc_user.id, name=rpc_user.get_display_name(), email=rpc_user.email
)
else:
created_by = None
result[alert_rules[rule_activity.alert_rule_id]]["created_by"] = created_by
for item in item_list:
if item.user_id or item.team_id:
actor = item.owner
if actor:
result[item]["owner"] = actor.identifier
if "original_alert_rule" in self.expand:
snapshot_activities = AlertRuleActivity.objects.filter(
alert_rule__in=item_list,
type=AlertRuleActivityType.SNAPSHOT.value,
)
for activity in snapshot_activities:
result[alert_rules[activity.alert_rule_id]][
"originalAlertRuleId"
] = activity.previous_alert_rule_id
if "latestIncident" in self.expand:
incident_map = {}
for incident in Incident.objects.filter(
id__in=Incident.objects.filter(alert_rule__in=alert_rules)
.values("alert_rule_id")
.annotate(incident_id=Max("id"))
.values("incident_id")
):
incident_map[incident.alert_rule_id] = serialize(incident, user=user)
for alert_rule in alert_rules.values():
result[alert_rule]["latestIncident"] = incident_map.get(alert_rule.id, None)
return result
def serialize(
self,
obj: AlertRule,
attrs: Mapping[Any, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> AlertRuleSerializerResponse:
# Mark that we're using legacy AlertRule models
report_used_legacy_models()
from sentry.incidents.endpoints.utils import translate_threshold
from sentry.incidents.logic import translate_aggregate_field
env = obj.snuba_query.environment
allow_mri = features.has(
"organizations:insights-alerts",
obj.organization,
actor=user,
)
# Temporary: Translate aggregate back here from `tags[sentry:user]` to `user` for the frontend.
aggregate = translate_aggregate_field(
obj.snuba_query.aggregate,
reverse=True,
allow_mri=allow_mri,
allow_eap=obj.snuba_query.dataset == Dataset.EventsAnalyticsPlatform.value,
)
# Apply transparency: Convert upsampled_count() back to count() for user-facing responses
# This hides the internal upsampling implementation from users
if aggregate == "upsampled_count()":
aggregate = "count()"
extrapolation_mode = obj.snuba_query.extrapolation_mode
data: AlertRuleSerializerResponse = {
"id": str(obj.id),
"name": obj.name,
"organizationId": str(obj.organization_id),
"status": obj.status,
"queryType": obj.snuba_query.type,
"dataset": obj.snuba_query.dataset,
"query": obj.snuba_query.query,
"aggregate": aggregate,
"thresholdType": obj.threshold_type,
"resolveThreshold": translate_threshold(obj, obj.resolve_threshold),
# TODO: Start having the frontend expect seconds
"timeWindow": obj.snuba_query.time_window / 60,
"environment": env.name if env else None,
# TODO: Start having the frontend expect seconds
"resolution": obj.snuba_query.resolution / 60,
"thresholdPeriod": obj.threshold_period,
"triggers": attrs.get("triggers", []),
"projects": sorted(attrs.get("projects", [])),
"owner": attrs.get("owner", None),
"originalAlertRuleId": attrs.get("originalAlertRuleId", None),
"comparisonDelta": obj.comparison_delta / 60 if obj.comparison_delta else None,
"dateModified": obj.date_modified,
"dateCreated": obj.date_added,
"createdBy": attrs.get("created_by", None),
"description": obj.description if obj.description is not None else "",
"sensitivity": obj.sensitivity,
"seasonality": obj.seasonality,
"detectionType": obj.detection_type,
}
rule_snooze = RuleSnooze.objects.filter(
Q(user_id=user.id) | Q(user_id=None), alert_rule=obj
)
if rule_snooze.exists():
data["snooze"] = True
if "latestIncident" in self.expand:
data["latestIncident"] = attrs.get("latestIncident", None)
if "errors" in attrs:
data["errors"] = attrs["errors"]
if extrapolation_mode is not None:
data["extrapolationMode"] = ExtrapolationMode(extrapolation_mode).name.lower()
return data
| AlertRuleSerializer |
python | getsentry__sentry | src/sentry/insights/endpoints/starred_segments.py | {
"start": 590,
"end": 758
} | class ____(serializers.Serializer):
segment_name = serializers.CharField(required=True)
project_id = serializers.IntegerField(required=True)
| StarSegmentSerializer |
python | apache__airflow | airflow-core/src/airflow/example_dags/example_skip_dag.py | {
"start": 1269,
"end": 2401
} | class ____(BaseOperator):
"""Empty operator which always skips the task."""
ui_color = "#e8b7e4"
def execute(self, context: Context):
raise AirflowSkipException
def create_test_pipeline(suffix, trigger_rule):
"""
Instantiate a number of operators for the given DAG.
:param str suffix: Suffix to append to the operator task_ids
:param str trigger_rule: TriggerRule for the join task
:param DAG dag_: The DAG to run the operators on
"""
skip_operator = EmptySkipOperator(task_id=f"skip_operator_{suffix}")
always_true = EmptyOperator(task_id=f"always_true_{suffix}")
join = EmptyOperator(task_id=trigger_rule, trigger_rule=trigger_rule)
final = EmptyOperator(task_id=f"final_{suffix}")
skip_operator >> join
always_true >> join
join >> final
with DAG(
dag_id="example_skip_dag",
schedule=datetime.timedelta(days=1),
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
catchup=False,
tags=["example"],
) as dag:
create_test_pipeline("1", TriggerRule.ALL_SUCCESS)
create_test_pipeline("2", TriggerRule.ONE_SUCCESS)
| EmptySkipOperator |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_dag_command.py | {
"start": 40190,
"end": 42561
} | class ____:
parser = cli_parser.get_parser()
test_bundles_config = {
"bundle1": TEST_DAGS_FOLDER / "test_example_bash_operator.py",
"bundle2": TEST_DAGS_FOLDER / "test_sensor.py",
"bundle3": TEST_DAGS_FOLDER / "test_dag_with_no_tags.py",
}
@classmethod
def setup_class(cls):
clear_db_dags()
def teardown_method(self):
clear_db_dags()
@conf_vars({("core", "load_examples"): "false"})
def test_reserialize(self, configure_dag_bundles, session):
with configure_dag_bundles(self.test_bundles_config):
dag_command.dag_reserialize(self.parser.parse_args(["dags", "reserialize"]))
serialized_dag_ids = set(session.execute(select(SerializedDagModel.dag_id)).scalars())
assert serialized_dag_ids == {"test_example_bash_operator", "test_dag_with_no_tags", "test_sensor"}
example_bash_op = session.execute(
select(DagModel).filter(DagModel.dag_id == "test_example_bash_operator")
).scalar()
assert example_bash_op.relative_fileloc == "." # the file _is_ the bundle path
assert example_bash_op.fileloc == str(TEST_DAGS_FOLDER / "test_example_bash_operator.py")
@conf_vars({("core", "load_examples"): "false"})
def test_reserialize_should_support_bundle_name_argument(self, configure_dag_bundles, session):
with configure_dag_bundles(self.test_bundles_config):
dag_command.dag_reserialize(
self.parser.parse_args(["dags", "reserialize", "--bundle-name", "bundle1"])
)
serialized_dag_ids = set(session.execute(select(SerializedDagModel.dag_id)).scalars())
assert serialized_dag_ids == {"test_example_bash_operator"}
@conf_vars({("core", "load_examples"): "false"})
def test_reserialize_should_support_multiple_bundle_name_arguments(self, configure_dag_bundles, session):
with configure_dag_bundles(self.test_bundles_config):
dag_command.dag_reserialize(
self.parser.parse_args(
["dags", "reserialize", "--bundle-name", "bundle1", "--bundle-name", "bundle2"]
)
)
serialized_dag_ids = set(session.execute(select(SerializedDagModel.dag_id)).scalars())
assert serialized_dag_ids == {"test_example_bash_operator", "test_sensor"}
| TestCliDagsReserialize |
python | eventlet__eventlet | tests/mysqldb_test.py | {
"start": 6930,
"end": 7108
} | class ____(tests.LimitedTestCase):
@tests.skip_unless(mysql_requirement)
def test_monkey_patching(self):
tests.run_isolated('mysqldb_monkey_patch.py')
| TestMonkeyPatch |
python | django__django | tests/admin_widgets/models.py | {
"start": 3661,
"end": 3821
} | class ____(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
location = models.CharField(max_length=20)
| Honeycomb |
python | rapidsai__cudf | python/cudf/cudf/core/multiindex.py | {
"start": 2334,
"end": 76304
} | class ____(Index):
"""A multi-level or hierarchical index.
Provides N-Dimensional indexing into Series and DataFrame objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes: sequence of arrays
Integers for each level designating which label at each location.
sortorder : optional int
Not yet supported
names: optional sequence of objects
Names for each of the index levels.
copy : bool, default False
Copy the levels and codes.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Not yet supported
Attributes
----------
names
nlevels
dtypes
levels
codes
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_level_values
get_loc
drop
Returns
-------
MultiIndex
Examples
--------
>>> import cudf
>>> cudf.MultiIndex(
... levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]])
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
)
"""
_levels: list[cudf.Index] | None
_codes: list[column.ColumnBase] | None
@_performance_tracking
def __init__(
self,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity=True,
nan_as_null=no_default,
) -> None:
if nan_as_null is no_default:
nan_as_null = (
False if cudf.get_option("mode.pandas_compatible") else None
)
if isinstance(levels, (pd.MultiIndex, MultiIndex)):
# TODO: Figure out why cudf.Index(pd.MultiIndex(...)) goes through here twice
# Somehow due to from_pandas calling cls?
return
if sortorder is not None:
raise NotImplementedError("sortorder is not yet supported")
if name is not None:
raise NotImplementedError(
"Use `names`, `name` is not yet supported"
)
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
elif not (is_list_like(levels) and len(levels) > 0):
raise ValueError("Must pass non-zero length sequence of levels")
elif not (is_list_like(codes) and len(codes) > 0):
raise ValueError("Must pass non-zero length sequence of codes")
elif len(codes) != len(levels):
raise ValueError(
f"levels must have the same length ({len(levels)}) "
f"as codes ({len(codes)})."
)
new_levels: list[cudf.Index] = []
for level in levels:
new_level = ensure_index(level, nan_as_null=nan_as_null)
if copy and new_level is level:
new_level = new_level.copy(deep=True)
new_levels.append(new_level)
new_codes: list[column.ColumnBase] = []
for code in codes:
if not (is_list_like(code) or is_column_like(code)):
raise TypeError("Each code must be list-like")
new_code = column.as_column(code, dtype=np.dtype(np.int64))
if copy and new_code is code:
new_code = new_code.copy(deep=True)
new_codes.append(new_code)
source_data: dict[Hashable, column.ColumnBase] = {}
for i, (code, level) in enumerate(
zip(new_codes, new_levels, strict=True)
):
if len(code):
lo, hi = code.minmax()
if lo < -1 or hi > len(level) - 1:
raise ValueError(
f"Codes must be -1 <= codes <= {len(level) - 1}"
)
if lo == -1:
# Now we can gather and insert null automatically
code[code == -1] = np.iinfo(SIZE_TYPE_DTYPE).min
result_col = level._column.take(code, nullify=True)
if (
cudf.get_option("mode.pandas_compatible")
and nan_as_null is False
and not is_dtype_obj_numeric(result_col.dtype)
and not is_pandas_nullable_extension_dtype(level.dtype)
and result_col.has_nulls(include_nan=False)
):
raise MixedTypeError(
"MultiIndex levels cannot have mixed types when `mode.pandas_compatible` is True and `nan_as_null` is False."
)
if (
cudf.get_option("mode.pandas_compatible")
and not is_dtype_obj_numeric(result_col.dtype)
and result_col.has_nulls(include_nan=False)
and nan_as_null is False
and not is_pandas_nullable_extension_dtype(level.dtype)
):
result_col = result_col.fillna(np.nan)
source_data[i] = result_col._with_type_metadata(level.dtype)
Frame.__init__(self, ColumnAccessor(source_data))
self._levels = new_levels
self._codes = new_codes
self._name = None
self.names = names
@property # type: ignore[explicit-override]
@_performance_tracking
def names(self):
return self._names
@names.setter
@_performance_tracking
def names(self, value):
if value is None:
value = [None] * self.nlevels
elif not is_list_like(value):
raise ValueError("Names should be list-like for a MultiIndex")
elif len(value) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if len(value) == len(set(value)):
# IMPORTANT: if the provided names are unique,
# we reconstruct self._data with the names as keys.
# If they are not unique, the keys of self._data
# and self._names will be different, which can lead
# to unexpected behavior in some cases. This is
# definitely buggy, but we can't disallow non-unique
# names either...
self._data = type(self._data)(
dict(zip(value, self._columns, strict=True)),
level_names=self._data.level_names,
verify=False,
)
self._names = pd.core.indexes.frozen.FrozenList(value)
@_performance_tracking
def to_series(self, index=None, name=None):
raise NotImplementedError(
"MultiIndex.to_series isn't implemented yet."
)
@_performance_tracking
def astype(self, dtype: Dtype, copy: bool = True) -> Self:
if cudf.dtype(dtype) != CUDF_STRING_DTYPE:
raise TypeError(
"Setting a MultiIndex dtype to anything other than object is "
"not supported"
)
return self
@_performance_tracking
def rename(self, names, inplace: bool = False) -> Self | None:
"""
Alter MultiIndex level names
Parameters
----------
names : list of label
Names to set, length must be the same as number of levels
inplace : bool, default False
If True, modifies objects directly, otherwise returns a new
``MultiIndex`` instance
Returns
-------
None or MultiIndex
Examples
--------
Renaming each levels of a MultiIndex to specified name:
>>> midx = cudf.MultiIndex.from_product(
... [('A', 'B'), (2020, 2021)], names=['c1', 'c2'])
>>> midx.rename(['lv1', 'lv2'])
MultiIndex([('A', 2020),
('A', 2021),
('B', 2020),
('B', 2021)],
names=['lv1', 'lv2'])
>>> midx.rename(['lv1', 'lv2'], inplace=True)
>>> midx
MultiIndex([('A', 2020),
('A', 2021),
('B', 2020),
('B', 2021)],
names=['lv1', 'lv2'])
``names`` argument must be a list, and must have same length as
``MultiIndex.levels``:
>>> midx.rename(['lv0'])
Traceback (most recent call last):
ValueError: Length of names must match number of levels in MultiIndex.
"""
return self.set_names(names, level=None, inplace=inplace)
@_performance_tracking
def set_names(
self, names, level=None, inplace: bool = False
) -> Self | None:
names_is_list_like = is_list_like(names)
level_is_list_like = is_list_like(level)
if level is not None and not level_is_list_like and names_is_list_like:
raise TypeError(
"Names must be a string when a single level is provided."
)
if not names_is_list_like and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not names_is_list_like:
names = [names]
if level is not None and not level_is_list_like:
level = [level]
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._level_index_from_level(lev) for lev in level]
existing_names = list(self.names)
for i, lev in enumerate(level):
existing_names[lev] = names[i]
names = existing_names
return self._set_names(names=names, inplace=inplace)
def _maybe_materialize_codes_and_levels(self: Self) -> Self:
"""
Set self._codes and self._levels from self._columns _when_ needed.
Factorization of self._columns to self._codes and self._levels is delayed
due to being expensive and sometimes unnecessary for operations.
MultiIndex methods are responsible for calling this when needed.
"""
if self._levels is None and self._codes is None:
levels = []
codes = []
for col in self._data.values():
code, cats = factorize(col)
codes.append(column.as_column(code.astype(np.dtype(np.int64))))
levels.append(cats)
self._levels = levels
self._codes = codes
return self
@classmethod
@_performance_tracking
def _from_data(
cls,
data: MutableMapping,
name: Any = None,
) -> Self:
"""
Use when you have a ColumnAccessor-like mapping but no codes and levels.
Preferable to use _simple_new if you have codes and levels.
"""
return cls._simple_new(
data=ColumnAccessor(data),
levels=None,
codes=None,
names=pd.core.indexes.frozen.FrozenList(data.keys()),
name=name,
)
@property
def _num_columns(self) -> int:
return len(self._data)
@_performance_tracking
def _from_data_like_self(self, data: MutableMapping) -> Self:
mi = type(self)._from_data(data, name=self.name)
if mi.nlevels == self.nlevels:
mi.names = self.names
return mi
@classmethod
def _simple_new(
cls,
data: ColumnAccessor,
levels: list[cudf.Index] | None,
codes: list[column.ColumnBase] | None,
names: pd.core.indexes.frozen.FrozenList,
name: Any = None,
) -> Self:
"""
Use when you have a ColumnAccessor-like mapping, codes, and levels.
"""
mi = object.__new__(cls)
mi._data = data
mi._levels = levels
mi._codes = codes
mi._names = names
mi._name = name
return mi
@property # type: ignore[explicit-override]
@_performance_tracking
def name(self):
return self._name
@name.setter
@_performance_tracking
def name(self, value):
self._name = value
@_performance_tracking
def copy(
self,
names=None,
deep=False,
name=None,
) -> Self:
"""Returns copy of MultiIndex object.
Returns a copy of `MultiIndex`. The `levels` and `codes` value can be
set to the provided parameters. When they are provided, the returned
MultiIndex is always newly constructed.
Parameters
----------
names : sequence of objects, optional (default None)
Names for each of the index levels.
deep : Bool (default False)
If True, `._data`, `._levels`, `._codes` will be copied. Ignored if
`levels` or `codes` are specified.
name : object, optional (default None)
Kept for compatibility with 1-dimensional Index. Should not
be used.
Returns
-------
Copy of MultiIndex Instance
Examples
--------
>>> df = cudf.DataFrame({'Close': [3400.00, 226.58, 3401.80, 228.91]})
>>> idx1 = cudf.MultiIndex(
... levels=[['2020-08-27', '2020-08-28'], ['AMZN', 'MSFT']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
... names=['Date', 'Symbol'])
>>> idx2 = idx1.copy(
... names=['col1', 'col2'])
>>> df.index = idx1
>>> df
Close
Date Symbol
2020-08-27 AMZN 3400.00
MSFT 226.58
2020-08-28 AMZN 3401.80
MSFT 228.91
>>> df.index = idx2
>>> df
Close
col1 col2
2020-08-27 AMZN 3400.00
MSFT 226.58
2020-08-28 AMZN 3401.80
MSFT 228.91
"""
if names is not None:
names = pd.core.indexes.frozen.FrozenList(names)
else:
names = self.names
if self._levels is not None:
levels: list[cudf.Index] | None = [
idx.copy(deep=deep) for idx in self._levels
]
else:
levels = self._levels
if self._codes is not None:
codes: list[column.ColumnBase] | None = [
code.copy(deep=deep) for code in self._codes
]
else:
codes = self._codes
return type(self)._simple_new(
data=self._data.copy(deep=deep),
levels=levels,
codes=codes,
names=names,
name=name,
)
@_performance_tracking
def __repr__(self) -> str:
max_seq_items = pd.get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
n = int(max_seq_items / 2) + 1
# TODO: Update the following two arange calls to
# a single arange call once arange has support for
# a vector start/end points.
indices = column.as_column(range(n))
indices = indices.append(
column.as_column(range(len(self) - n, len(self), 1))
)
preprocess = self.take(indices)
else:
preprocess = self
arrays = []
for name, col in zip(self.names, preprocess._columns, strict=True):
try:
pd_idx = col.to_pandas(nullable=True)
except NotImplementedError:
pd_idx = col.to_pandas(nullable=False)
pd_idx.name = name
arrays.append(pd_idx)
preprocess_pd = pd.MultiIndex.from_arrays(arrays)
output = repr(preprocess_pd)
output_prefix = self.__class__.__name__ + "("
output = output.lstrip(output_prefix)
lines = output.split("\n")
if len(lines) > 1:
if "length=" in lines[-1] and len(self) != len(preprocess_pd):
last_line = lines[-1]
length_index = last_line.index("length=")
last_line = last_line[:length_index] + f"length={len(self)})"
lines = lines[:-1]
lines.append(last_line)
data_output = "\n".join(lines)
return output_prefix + data_output
@property
@_external_only_api("Use ._codes instead")
@_performance_tracking
def codes(self) -> pd.core.indexes.frozen.FrozenList:
"""
Returns the codes of the underlying MultiIndex.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a':[1, 2, 3], 'b':[10, 11, 12]})
>>> midx = cudf.MultiIndex.from_frame(df)
>>> midx
MultiIndex([(1, 10),
(2, 11),
(3, 12)],
names=['a', 'b'])
>>> midx.codes
FrozenList([[0, 1, 2], [0, 1, 2]])
"""
self._maybe_materialize_codes_and_levels()
return pd.core.indexes.frozen.FrozenList(
col.values
for col in self._codes # type: ignore[union-attr]
)
def get_slice_bound(self, label, side):
raise NotImplementedError(
"get_slice_bound is not currently implemented."
)
@property
@_performance_tracking
def levels(self) -> list[cudf.Index]:
"""
Returns list of levels in the MultiIndex
Returns
-------
List of Index objects
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a':[1, 2, 3], 'b':[10, 11, 12]})
>>> cudf.MultiIndex.from_frame(df)
MultiIndex([(1, 10),
(2, 11),
(3, 12)],
names=['a', 'b'])
>>> midx = cudf.MultiIndex.from_frame(df)
>>> midx
MultiIndex([(1, 10),
(2, 11),
(3, 12)],
names=['a', 'b'])
>>> midx.levels
[Index([1, 2, 3], dtype='int64', name='a'), Index([10, 11, 12], dtype='int64', name='b')]
"""
self._maybe_materialize_codes_and_levels()
return [
idx.rename(name) # type: ignore[misc]
for idx, name in zip(self._levels, self.names, strict=True) # type: ignore[arg-type]
]
@property # type: ignore[explicit-override]
@_performance_tracking
def ndim(self) -> int:
"""Dimension of the data. For MultiIndex ndim is always 2."""
return 2
@_performance_tracking
def _get_level_label(self, level):
"""Get name of the level.
Parameters
----------
level : int or level name
if level is name, it will be returned as it is
else if level is index of the level, then level
label will be returned as per the index.
"""
if level in self.names:
return level
else:
return self.names[level]
@_performance_tracking
def isin(self, values, level=None) -> cp.ndarray:
"""Return a boolean array where the index values are in values.
Compute boolean array of whether each index value is found in
the passed set of values. The length of the returned boolean
array matches the length of the index.
Parameters
----------
values : set, list-like, Index or Multi-Index
Sought values.
level : str or int, optional
Name or position of the index level to use (if the index
is a MultiIndex).
Returns
-------
is_contained : cupy array
CuPy array of boolean values.
Notes
-----
When `level` is None, `values` can only be MultiIndex, or a
set/list-like tuples.
When `level` is provided, `values` can be Index or MultiIndex,
or a set/list-like tuples.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> midx = cudf.from_pandas(pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color')))
>>> midx
MultiIndex([(1, 'red'),
(2, 'blue'),
(3, 'green')],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
"""
if level is None:
if isinstance(values, cudf.MultiIndex):
values_idx = values
elif (
(
isinstance(
values,
(
cudf.Series,
cudf.Index,
cudf.DataFrame,
column.ColumnBase,
),
)
)
or (not is_list_like(values))
or (
is_list_like(values)
and len(values) > 0
and not isinstance(values[0], tuple)
)
):
raise TypeError(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
)
else:
values_idx = cudf.MultiIndex.from_tuples(
values, names=self.names
)
self_df = self.to_frame(index=False).reset_index()
values_df = values_idx.to_frame(index=False)
idx = self_df.merge(values_df, how="leftsemi")._data["index"]
res = column.as_column(False, length=len(self))
res[idx] = True
result = res.values
else:
level_series = self.get_level_values(level)
result = level_series.isin(values)
return result
def where(self, cond, other=None, inplace=False):
raise NotImplementedError(
".where is not supported for MultiIndex operations"
)
@_performance_tracking
def _compute_validity_mask(self, index, row_tuple, max_length):
"""Computes the valid set of indices of values in the lookup"""
# TODO: A non-slice(None) will probably raise in as_column
lookup_dict = {
i: column.as_column(row)
for i, row in enumerate(row_tuple)
if not (isinstance(row, slice) and row == slice(None))
}
lookup = cudf.DataFrame._from_data(lookup_dict)
frame = cudf.DataFrame._from_data(
ColumnAccessor(
dict(enumerate(index._columns)),
verify=False,
)
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
data_table = cudf.concat(
[
frame,
cudf.DataFrame._from_data(
ColumnAccessor(
{"idx": column.as_column(range(len(frame)))},
verify=False,
)
),
],
axis=1,
)
# Sort indices in pandas compatible mode
# because we want the indices to be fetched
# in a deterministic order.
# TODO: Remove this after merge/join
# obtain deterministic ordering.
if cudf.get_option("mode.pandas_compatible"):
lookup_order = "_" + "_".join(map(str, lookup._column_names))
lookup[lookup_order] = column.as_column(range(len(lookup)))
postprocess = operator.methodcaller(
"sort_values", by=[lookup_order, "idx"]
)
else:
postprocess = lambda r: r # noqa: E731
result = postprocess(lookup.merge(data_table))["idx"]
# Avoid computing levels unless the result of the merge is empty,
# which suggests that a KeyError should be raised.
if len(result) == 0:
for idx, row in enumerate(row_tuple):
if row == slice(None):
continue
if row not in index.levels[idx]._column:
raise KeyError(row)
return result
@_performance_tracking
def _get_valid_indices_by_tuple(self, index, row_tuple, max_length):
# Instructions for Slicing
# if tuple, get first and last elements of tuple
# if open beginning tuple, get 0 to highest valid_index
# if open ending tuple, get highest valid_index to len()
# if not open end or beginning, get range lowest beginning index
# to highest ending index
if isinstance(row_tuple, slice):
if (
isinstance(row_tuple.start, numbers.Number)
or isinstance(row_tuple.stop, numbers.Number)
or row_tuple == slice(None)
):
stop = row_tuple.stop or max_length
start, stop, step = row_tuple.indices(stop)
return column.as_column(range(start, stop, step))
start_values = self._compute_validity_mask(
index, row_tuple.start, max_length
)
stop_values = self._compute_validity_mask(
index, row_tuple.stop, max_length
)
return column.as_column(
range(start_values.min(), stop_values.max() + 1)
)
elif isinstance(row_tuple, numbers.Number):
return row_tuple
return self._compute_validity_mask(index, row_tuple, max_length)
@_performance_tracking
def _index_and_downcast(self, result, index, index_key):
if isinstance(index_key, (numbers.Number, slice)):
index_key = [index_key]
if (
len(index_key) > 0 and not isinstance(index_key, tuple)
) or isinstance(index_key[0], slice):
index_key = index_key[0]
slice_access = isinstance(index_key, slice)
# Count the last n-k columns where n is the number of columns and k is
# the length of the indexing tuple
size = 0
if not isinstance(index_key, (numbers.Number, slice)):
size = len(index_key)
num_selected = max(0, index.nlevels - size)
# determine if we should downcast from a DataFrame to a Series
need_downcast = (
isinstance(result, cudf.DataFrame)
and len(result) == 1 # only downcast if we have a single row
and not slice_access # never downcast if we sliced
and (
size == 0 # index_key was an integer
# we indexed into a single row directly, using its label:
or len(index_key) == self.nlevels
)
)
if need_downcast:
result = result.T
return result[result._column_names[0]]
if len(result) == 0 and not slice_access:
# Pandas returns an empty Series with a tuple as name
# the one expected result column
result = cudf.Series._from_data(
{}, name=tuple(col[0] for col in index._columns)
)
elif num_selected == 1:
# If there's only one column remaining in the output index, convert
# it into an Index and name the final index values according
# to that column's name.
*_, last_column = index._data.columns
index = cudf.Index._from_column(last_column, name=index.names[-1])
elif num_selected > 1:
# Otherwise pop the leftmost levels, names, and codes from the
# source index until it has the correct number of columns (n-k)
result.reset_index(drop=True)
if index.names is not None:
result.names = index.names[size:]
index = MultiIndex(
levels=index.levels[size:],
codes=index._codes[size:],
names=index.names[size:],
)
if isinstance(index_key, tuple):
result.index = index
return result
@_performance_tracking
def _get_row_major(
self,
df: DataFrameOrSeries,
row_tuple: numbers.Number
| slice
| tuple[Any, ...]
| list[tuple[Any, ...]],
) -> DataFrameOrSeries:
if isinstance(row_tuple, slice):
if row_tuple.step == 0:
raise ValueError("slice step cannot be zero")
if row_tuple.start is None:
row_tuple = slice(self[0], row_tuple.stop, row_tuple.step)
if row_tuple.stop is None:
row_tuple = slice(row_tuple.start, self[-1], row_tuple.step)
if isinstance(row_tuple.start, bool) or isinstance(
row_tuple.stop, bool
):
raise TypeError(
f"{row_tuple}: boolean values can not be used in a slice"
)
self._validate_indexer(row_tuple)
valid_indices = self._get_valid_indices_by_tuple(
df.index, row_tuple, len(df)
)
if isinstance(valid_indices, column.ColumnBase):
indices = cudf.Series._from_column(valid_indices)
else:
indices = cudf.Series(valid_indices)
result = df.take(indices)
final = self._index_and_downcast(result, result.index, row_tuple)
return final
@_performance_tracking
def _validate_indexer(
self,
indexer: numbers.Number
| slice
| tuple[Any, ...]
| list[tuple[Any, ...]],
) -> None:
if isinstance(indexer, numbers.Number):
return
if isinstance(indexer, tuple):
# drop any slice(None) from the end:
indexer = tuple(
itertools.dropwhile(
lambda x: x == slice(None), reversed(indexer)
)
)[::-1]
# now check for size
if len(indexer) > self.nlevels:
raise IndexError("Indexer size exceeds number of levels")
elif isinstance(indexer, slice):
self._validate_indexer(indexer.start)
self._validate_indexer(indexer.stop)
else:
for i in indexer:
self._validate_indexer(i)
@_performance_tracking
def __eq__(self, other):
if isinstance(other, MultiIndex):
return np.array(
[
self_col.equals(other_col)
for self_col, other_col in zip(
self._columns, other._columns, strict=True
)
]
)
return NotImplemented
@property # type: ignore[explicit-override]
@_performance_tracking
def size(self) -> int:
# The size of a MultiIndex is only dependent on the number of rows.
return self._num_rows
@_performance_tracking
def take(self, indices) -> Self:
if isinstance(indices, cudf.Series) and indices.has_nulls:
raise ValueError("Column must have no nulls.")
obj = super().take(indices)
obj.names = self.names
return obj
@_performance_tracking
def serialize(self):
header, frames = super().serialize()
# Overwrite the names in _data with the true names.
header["column_names"] = self.names
return header, frames
@classmethod
@_performance_tracking
def deserialize(cls, header, frames):
# Spoof the column names to construct the frame, then set manually.
column_names = header["column_names"]
header["column_names"] = range(0, len(column_names))
obj = super().deserialize(header, frames)
return obj._set_names(column_names)
@_performance_tracking
def __getitem__(self, index):
flatten = isinstance(index, int)
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
idx = range(start, stop, step)
elif is_scalar(index):
if isinstance(index, float):
raise IndexError("indexing with a float is disallowed.")
idx = [index]
else:
idx = index
indexer = column.as_column(idx)
ca = self._data._from_columns_like_self(
(col.take(indexer) for col in self._columns), verify=False
)
if self._codes is not None:
codes = [code.take(indexer) for code in self._codes]
else:
codes = self._codes
result = type(self)._simple_new(
data=ca, codes=codes, levels=self._levels, names=self.names
)
# we are indexing into a single row of the MultiIndex,
# return that row as a tuple:
if flatten:
return result.to_pandas()[0]
else:
return result
@_performance_tracking
def equals(self, other) -> bool:
return Frame.equals(self, other)
@_performance_tracking
def to_arrow(self) -> pa.Table:
return Frame.to_arrow(self)
@_performance_tracking
def to_frame(
self,
index: bool = True,
name=no_default,
allow_duplicates: bool = False,
) -> DataFrame:
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
allow_duplicates : bool, optional default False
Allow duplicate column labels to be created. Note
that this parameter is non-functional because
duplicates column labels aren't supported in cudf.
Returns
-------
DataFrame
Examples
--------
>>> import cudf
>>> mi = cudf.MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
)
>>> df = mi.to_frame()
>>> df
0 1
a c a c
b d b d
>>> df = mi.to_frame(index=False)
>>> df
0 1
0 a c
1 b d
>>> df = mi.to_frame(name=['x', 'y'])
>>> df
x y
a c a c
b d b d
"""
if name is no_default:
column_names = [
level if name is None else name
for level, name in enumerate(self.names)
]
elif not is_list_like(name):
raise TypeError(
"'name' must be a list / sequence of column names."
)
elif len(name) != self.nlevels:
raise ValueError(
"'name' should have the same length as "
"number of levels on index."
)
else:
column_names = name
if len(column_names) != len(set(column_names)):
raise ValueError("Duplicate column names are not allowed")
ca = ColumnAccessor(
dict(
zip(
column_names,
(col.copy() for col in self._columns),
strict=True,
)
),
verify=False,
)
return cudf.DataFrame._from_data(
data=ca, index=self if index else None
)
@_performance_tracking
def _level_to_ca_label(self, level) -> tuple[Hashable, int]:
"""
Convert a level to a ColumAccessor label and an integer position.
Useful if self._column_names != self.names.
Parameters
----------
level : int or label
Returns
-------
tuple[Hashable, int]
(ColumnAccessor label corresponding to level, integer position of the level)
"""
colnames = self._column_names
try:
level_idx = colnames.index(level)
except ValueError:
if isinstance(level, int):
if level < 0:
level = level + len(colnames)
if level < 0 or level >= len(colnames):
raise IndexError(f"Invalid level number: '{level}'")
level_idx = level
level = colnames[level_idx]
elif level in self.names:
level_idx = list(self.names).index(level)
level = colnames[level_idx]
else:
raise KeyError(f"Level not found: '{level}'")
return level, level_idx
@_performance_tracking
def get_level_values(self, level) -> Index:
"""
Return the values at the requested level
Parameters
----------
level : int or label
Returns
-------
An Index containing the values at the requested level.
"""
level, level_idx = self._level_to_ca_label(level)
level_values = cudf.Index._from_column(
self._data[level], name=self.names[level_idx]
)
return level_values
def _is_numeric(self) -> bool:
return False
def _is_boolean(self) -> bool:
return False
def _is_integer(self) -> bool:
return False
def _is_floating(self) -> bool:
return False
def _is_object(self) -> bool:
return False
def _is_categorical(self) -> bool:
return False
def _is_interval(self) -> bool:
return False
@classmethod
@_performance_tracking
def _concat(cls, objs) -> Self:
# TODO: This will discard previously computed self._codes and self._levels.
# Try preserving them if defined.
source_data = [o.to_frame(index=False) for o in objs]
# TODO: Verify if this is really necessary or if we can rely on
# DataFrame._concat.
if len(source_data) > 1:
colnames = source_data[0]._data.to_pandas_index
for obj in source_data[1:]:
obj.columns = colnames
source_df = cudf.DataFrame._concat(source_data)
try:
# Only set names if all objs have the same names
(names,) = {o.names for o in objs} - {None}
except ValueError:
names = [None] * source_df._num_columns
return cudf.MultiIndex.from_frame(source_df, names=names)
@classmethod
@_performance_tracking
def from_tuples(
cls, tuples, sortorder: int | None = None, names=None
) -> Self:
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> cudf.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
# Use Pandas for handling Python host objects
pdi = pd.MultiIndex.from_tuples(
tuples, sortorder=sortorder, names=names
)
return cls(levels=pdi.levels, codes=pdi.codes, names=pdi.names)
@property
def dtypes(self) -> pd.Series:
"""
Return the dtypes as a Series for the underlying MultiIndex.
Examples
--------
>>> import cudf
>>> idx = cudf.MultiIndex.from_product([(0, 1, 2), ('green', 'purple')],
... names=['number', 'color'])
>>> idx
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
>>> idx.dtypes
number int64
color object
dtype: object
"""
# Not using DataFrame.dtypes to avoid expensive invocation of `._data.to_pandas_index`
return pd.Series(dict(self.to_frame()._dtypes))
@_performance_tracking
def to_numpy(self) -> np.ndarray:
return self.values_host
def to_flat_index(self):
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
This is not currently implemented
"""
# TODO: Could implement as Index of ListDtype?
raise NotImplementedError("to_flat_index is not currently supported.")
@property
@_performance_tracking
def values_host(self) -> np.ndarray:
"""
Return a numpy representation of the MultiIndex.
Only the values in the MultiIndex will be returned.
Returns
-------
out : numpy.ndarray
The values of the MultiIndex.
Examples
--------
>>> import cudf
>>> midx = cudf.MultiIndex(
... levels=[[1, 3, 4, 5], [1, 2, 5]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> midx.values_host
array([(1, 1), (1, 5), (3, 2), (4, 2), (5, 1)], dtype=object)
>>> type(midx.values_host)
<class 'numpy.ndarray'>
"""
return self.to_pandas().values
@property
@_performance_tracking
def values(self) -> cp.ndarray:
"""
Return a CuPy representation of the MultiIndex.
Only the values in the MultiIndex will be returned.
Returns
-------
out: cupy.ndarray
The values of the MultiIndex.
Examples
--------
>>> import cudf
>>> midx = cudf.MultiIndex(
... levels=[[1, 3, 4, 5], [1, 2, 5]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> midx.values
array([[1, 1],
[1, 5],
[3, 2],
[4, 2],
[5, 1]])
>>> type(midx.values)
<class 'cupy...ndarray'>
"""
if cudf.get_option("mode.pandas_compatible"):
raise NotImplementedError(
"Unable to create a cupy array with tuples."
)
return Frame.to_cupy(self)
@classmethod
@_performance_tracking
def from_arrow(cls, data: pa.Table) -> Self:
return cls._from_data(Frame.from_arrow(data)._data)
@classmethod
@_performance_tracking
def from_frame(
cls,
df: pd.DataFrame | DataFrame,
sortorder: int | None = None,
names=None,
) -> Self:
"""
Make a MultiIndex from a DataFrame.
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> cudf.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> cudf.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if isinstance(df, pd.DataFrame):
source_data = cudf.DataFrame(df)
elif isinstance(df, cudf.DataFrame):
source_data = df
else:
raise TypeError("Input must be a pandas or cudf DataFrame.")
names = names if names is not None else source_data._column_names
return cls.from_arrays(
source_data._columns, sortorder=sortorder, names=names
)
@classmethod
@_performance_tracking
def from_product(
cls, iterables, sortorder: int | None = None, names=None
) -> Self:
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> cudf.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
# Use Pandas for handling Python host objects
pdi = pd.MultiIndex.from_product(
iterables, sortorder=sortorder, names=names
)
return cls(levels=pdi.levels, codes=pdi.codes, names=pdi.names)
@classmethod
@_performance_tracking
def from_arrays(
cls,
arrays,
sortorder=None,
names=None,
) -> Self:
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : optional int
Not yet supported
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> cudf.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
codes = []
levels = []
names_from_arrays = []
for array in arrays:
if not (is_list_like(array) or is_column_like(array)):
raise TypeError(error_msg)
code, level = factorize(array, sort=True)
codes.append(code)
levels.append(level)
names_from_arrays.append(getattr(array, "name", None))
if names is None:
names = names_from_arrays
return cls(
codes=codes, levels=levels, sortorder=sortorder, names=names
)
@_performance_tracking
def swaplevel(self, i=-2, j=-1) -> Self:
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int or str, default -2
First level of index to be swapped.
j : int or str, default -1
Second level of index to be swapped.
Returns
-------
MultiIndex
A new MultiIndex.
Examples
--------
>>> import cudf
>>> mi = cudf.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
name_i = self._column_names[i] if isinstance(i, int) else i
name_j = self._column_names[j] if isinstance(j, int) else j
to_swap = {name_i, name_j}
new_data = {}
# TODO: Preserve self._codes and self._levels if set
for k, v in self._column_labels_and_values:
if k not in to_swap:
new_data[k] = v
elif k == name_i:
new_data[name_j] = self._data[name_j]
elif k == name_j:
new_data[name_i] = self._data[name_i]
midx = type(self)._from_data(new_data)
if all(n is None for n in self.names):
midx = midx.set_names(self.names)
return midx
@_performance_tracking
def droplevel(self, level=-1) -> Self | Index:
"""
Removes the specified levels from the MultiIndex.
Parameters
----------
level : level name or index, list-like
Integer, name or list of such, specifying one or more
levels to drop from the MultiIndex
Returns
-------
A MultiIndex or Index object, depending on the number of remaining
levels.
Examples
--------
>>> import cudf
>>> idx = cudf.MultiIndex.from_frame(
... cudf.DataFrame(
... {
... "first": ["a", "a", "a", "b", "b", "b"],
... "second": [1, 1, 2, 2, 3, 3],
... "third": [0, 1, 2, 0, 1, 2],
... }
... )
... )
Dropping level by index:
>>> idx.droplevel(0)
MultiIndex([(1, 0),
(1, 1),
(2, 2),
(2, 0),
(3, 1),
(3, 2)],
names=['second', 'third'])
Dropping level by name:
>>> idx.droplevel("first")
MultiIndex([(1, 0),
(1, 1),
(2, 2),
(2, 0),
(3, 1),
(3, 2)],
names=['second', 'third'])
Dropping multiple levels:
>>> idx.droplevel(["first", "second"])
Index([0, 1, 2, 0, 1, 2], dtype='int64', name='third')
"""
if is_scalar(level):
level = (level,)
elif len(level) == 0:
return self
new_names = list(self.names)
new_data = self._data.copy(deep=False)
for i in sorted(
(self._level_index_from_level(lev) for lev in level), reverse=True
):
new_names.pop(i)
new_data.pop(self._data.names[i])
if len(new_data) == 1:
return Index._from_column(
next(iter(new_data.values())), name=new_names[0]
)
else:
mi = type(self)._from_data(new_data)
mi.names = new_names
return mi
@_performance_tracking
def to_pandas(
self, *, nullable: bool = False, arrow_type: bool = False
) -> pd.MultiIndex:
# cudf uses np.iinfo(SIZE_TYPE_DTYPE).min as missing code
# pandas uses -1 as missing code
self._maybe_materialize_codes_and_levels()
pd_codes = (
code.find_and_replace(
column.as_column(np.iinfo(SIZE_TYPE_DTYPE).min, length=1),
column.as_column(-1, length=1),
)
for code in self._codes # type: ignore[union-attr]
)
return pd.MultiIndex(
levels=[
level.to_pandas(nullable=nullable, arrow_type=arrow_type)
for level in self.levels
],
codes=[col.values_host for col in pd_codes],
names=self.names,
)
@classmethod
@_performance_tracking
def from_pandas(
cls, multiindex: pd.MultiIndex, nan_as_null=no_default
) -> Self:
"""
Convert from a Pandas MultiIndex
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> pmi = pd.MultiIndex(levels=[['a', 'b'], ['c', 'd']],
... codes=[[0, 1], [1, 1]])
>>> cudf.from_pandas(pmi)
MultiIndex([('a', 'd'),
('b', 'd')],
)
"""
warnings.warn(
"from_pandas is deprecated and will be removed in a future version. "
"Pass the MultiIndex names, codes and levels to the MultiIndex constructor instead.",
FutureWarning,
)
if not isinstance(multiindex, pd.MultiIndex):
raise TypeError("not a pandas.MultiIndex")
if nan_as_null is no_default:
nan_as_null = (
False if cudf.get_option("mode.pandas_compatible") else None
)
return cls(
levels=multiindex.levels,
codes=multiindex.codes,
names=multiindex.names,
nan_as_null=nan_as_null,
)
@cached_property # type: ignore[explicit-override]
@_performance_tracking
def is_unique(self) -> bool:
return len(self) == self.nunique(dropna=False)
@property
def dtype(self) -> np.dtype:
return np.dtype("O")
@_performance_tracking
def _is_sorted(self, ascending: bool) -> bool:
return sorting.is_sorted(
self._columns,
ascending=itertools.repeat(ascending, times=self._num_columns),
na_position=itertools.repeat("first", times=self._num_columns),
)
@cached_property
@_performance_tracking
def is_monotonic_increasing(self) -> bool:
"""
Return if the index is monotonic increasing
(only equal or increasing) values.
"""
return self._is_sorted(True)
@cached_property
@_performance_tracking
def is_monotonic_decreasing(self) -> bool:
"""
Return if the index is monotonic decreasing
(only equal or decreasing) values.
"""
return self._is_sorted(False)
@_performance_tracking
def fillna(self, value) -> Self:
"""
Fill null values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill nulls. This value cannot be a
list-likes.
Returns
-------
filled : MultiIndex
Examples
--------
>>> import cudf
>>> index = cudf.MultiIndex(
... levels=[["a", "b", "c", None], ["1", None, "5"]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> index
MultiIndex([( 'a', '1'),
( 'a', '5'),
( 'b', <NA>),
( 'c', <NA>),
(<NA>, '1')],
names=['x', 'y'])
>>> index.fillna('hello')
MultiIndex([( 'a', '1'),
( 'a', '5'),
( 'b', 'hello'),
( 'c', 'hello'),
('hello', '1')],
names=['x', 'y'])
"""
return super().fillna(value=value)
@_performance_tracking
def unique(self, level: int | None = None) -> Self | Index:
if level is None:
return self.drop_duplicates(keep="first")
else:
return self.get_level_values(level).unique()
@_performance_tracking
def nunique(self, dropna: bool = True) -> int:
mi = self.dropna(how="all") if dropna else self
return len(mi.unique())
@_performance_tracking
def memory_usage(self, deep: bool = False) -> int:
usage = sum(col.memory_usage for col in self._columns)
if self._levels is not None:
usage += sum(
level.memory_usage(deep=deep) for level in self._levels
)
if self._codes is not None:
usage += sum(code.memory_usage for code in self._codes)
return usage
@_performance_tracking
def difference(self, other, sort=None) -> Self:
if hasattr(other, "to_pandas"):
other = other.to_pandas()
return cudf.from_pandas(self.to_pandas().difference(other, sort))
@_performance_tracking
def append(self, other) -> Self:
"""
Append a collection of MultiIndex objects together
Parameters
----------
other : MultiIndex or list/tuple of MultiIndex objects
Returns
-------
appended : Index
Examples
--------
>>> import cudf
>>> idx1 = cudf.MultiIndex(
... levels=[[1, 2], ['blue', 'red']],
... codes=[[0, 0, 1, 1], [1, 0, 1, 0]]
... )
>>> idx2 = cudf.MultiIndex(
... levels=[[3, 4], ['blue', 'red']],
... codes=[[0, 0, 1, 1], [1, 0, 1, 0]]
... )
>>> idx1
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
)
>>> idx2
MultiIndex([(3, 'red'),
(3, 'blue'),
(4, 'red'),
(4, 'blue')],
)
>>> idx1.append(idx2)
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue'),
(3, 'red'),
(3, 'blue'),
(4, 'red'),
(4, 'blue')],
)
"""
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
for obj in to_concat:
if not isinstance(obj, MultiIndex):
raise TypeError(
f"all objects should be of type "
f"MultiIndex for MultiIndex.append, "
f"found object of type: {type(obj)}"
)
return type(self)._concat(to_concat)
@_performance_tracking
def __array_function__(self, func, types, args, kwargs):
cudf_df_module = MultiIndex
for submodule in func.__module__.split(".")[1:]:
# point cudf to the correct submodule
if hasattr(cudf_df_module, submodule):
cudf_df_module = getattr(cudf_df_module, submodule)
else:
return NotImplemented
fname = func.__name__
handled_types = [cudf_df_module, np.ndarray]
for t in types:
if t not in handled_types:
return NotImplemented
if hasattr(cudf_df_module, fname):
cudf_func = getattr(cudf_df_module, fname)
# Handle case if cudf_func is same as numpy function
if cudf_func is func:
return NotImplemented
else:
return cudf_func(*args, **kwargs)
else:
return NotImplemented
def _level_index_from_level(self, level) -> int:
"""
Return level index from given level name or index
"""
try:
return self.names.index(level)
except ValueError:
if not is_integer(level):
raise KeyError(f"Level {level} not found")
if level < 0:
level += self.nlevels
if level >= self.nlevels:
raise IndexError(
f"Level {level} out of bounds. "
f"Index has {self.nlevels} levels."
) from None
return level
@_performance_tracking
def get_indexer(self, target, method=None, limit=None, tolerance=None):
if tolerance is not None:
raise NotImplementedError(
"Parameter tolerance is not supported yet."
)
if method == "nearest":
raise NotImplementedError(
f"{method=} is not supported yet for MultiIndex."
)
if method in {"ffill", "bfill", "pad", "backfill"} and not (
self.is_monotonic_increasing or self.is_monotonic_decreasing
):
raise ValueError(
"index must be monotonic increasing or decreasing"
)
result = column.as_column(
-1,
length=len(target),
dtype=SIZE_TYPE_DTYPE,
)
if not len(self):
return self._return_get_indexer_result(result.values)
try:
target = cudf.MultiIndex.from_tuples(target)
except TypeError as e:
if isinstance(e, MixedTypeError):
raise e
return self._return_get_indexer_result(result.values)
join_keys = [
_match_join_keys(lcol, rcol, "inner")
for lcol, rcol in zip(target._columns, self._columns, strict=True)
]
join_keys = map(list, zip(*join_keys, strict=True))
with acquire_spill_lock():
plc_tables = [
plc.Table([col.to_pylibcudf(mode="read") for col in cols])
for cols in join_keys
]
left_plc, right_plc = plc.join.inner_join(
plc_tables[0],
plc_tables[1],
plc.types.NullEquality.EQUAL,
)
scatter_map = ColumnBase.from_pylibcudf(left_plc)
indices = ColumnBase.from_pylibcudf(right_plc)
result_series = cudf.Series._from_column(
result._scatter_by_column(scatter_map, indices)
)
if method in {"ffill", "bfill", "pad", "backfill"}:
result_series = _get_indexer_basic(
index=self,
positions=result_series,
method=method,
target_col=target.to_frame(index=False)[
list(range(0, self.nlevels))
],
tolerance=tolerance,
)
elif method is not None:
raise ValueError(
f"{method=} is unsupported, only supported values are: "
"{['ffill'/'pad', 'bfill'/'backfill', None]}"
)
return self._return_get_indexer_result(result_series.to_cupy())
@_performance_tracking
def get_loc(self, key):
is_sorted = (
self.is_monotonic_increasing or self.is_monotonic_decreasing
)
is_unique = self.is_unique
key = (key,) if not isinstance(key, tuple) else key
# Handle partial key search. If length of `key` is less than `nlevels`,
# Only search levels up to `len(key)` level.
partial_index = self.__class__._from_data(
data=self._data.select_by_index(slice(len(key)))
)
(
lower_bound,
upper_bound,
sort_inds,
) = _lexsorted_equal_range(
partial_index,
[column.as_column(k, length=1) for k in key],
is_sorted,
)
if lower_bound == upper_bound:
raise KeyError(key)
if is_unique and lower_bound + 1 == upper_bound:
# Indices are unique (Pandas constraint), search result is unique,
# return int.
return (
lower_bound
if is_sorted
else sort_inds.element_indexing(lower_bound)
)
if is_sorted:
# In monotonic index, lex search result is continuous. A slice for
# the range is returned.
return slice(lower_bound, upper_bound)
true_inds = sort_inds.slice(lower_bound, upper_bound).values
true_inds = _maybe_indices_to_slice(true_inds)
if isinstance(true_inds, slice):
return true_inds
# Not sorted and not unique. Return a boolean mask
mask = cp.full(len(self), False)
mask[true_inds] = True
return mask
def _get_reconciled_name_object(self, other) -> Self:
"""
If the result of a set operation will be self,
return self, unless the names change, in which
case make a shallow copy of self.
"""
names = self._maybe_match_names(other)
if self.names != names:
return self.rename(names)
return self
def _maybe_match_names(self, other) -> list[Hashable]:
"""
Try to find common names to attach to the result of an operation
between a and b. Return a consensus list of names if they match
at least partly or list of None if they have completely
different names.
"""
if self.nlevels != other.nlevels:
return [None] * self.nlevels
return [
self_name if _is_same_name(self_name, other_name) else None
for self_name, other_name in zip(
self.names, other.names, strict=True
)
]
@_performance_tracking
def union(self, other, sort=None) -> Self:
if not isinstance(other, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other, names=self.names)
except (ValueError, TypeError) as err:
# ValueError raised by tuples_to_object_array if we
# have non-object dtype
raise TypeError(msg) from err
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values of "
f"None or False; {sort} was passed."
)
if not len(other) or self.equals(other):
return self._get_reconciled_name_object(other)
elif not len(self):
return other._get_reconciled_name_object(self)
return self._union(other, sort=sort)
@_performance_tracking
def _union(self, other, sort=None) -> Self:
# TODO: When to_frame is refactored to return a
# deep copy in future, we should push most of the common
# logic between MultiIndex._union & Index._union into
# Index._union.
other_df = other.copy(deep=True).to_frame(index=False)
self_df = self.copy(deep=True).to_frame(index=False)
col_names = list(range(0, self.nlevels))
self_df.columns = col_names
other_df.columns = col_names
self_df["order"] = self_df.index
other_df["order"] = other_df.index
result_df = self_df.merge(other_df, on=col_names, how="outer")
result_df = result_df.sort_values(
by=result_df._data.to_pandas_index[self.nlevels :],
ignore_index=True,
)
midx = type(self)._from_data(result_df.iloc[:, : self.nlevels]._data)
midx.names = self.names if self.names == other.names else None
if sort in {None, True} and len(other):
return midx.sort_values()
return midx
@_performance_tracking
def _intersection(self, other, sort=None) -> Self:
if self.names != other.names:
deep = True
col_names = list(range(0, self.nlevels))
res_name = (None,) * self.nlevels
else:
deep = False
col_names = None
res_name = self.names
other_df = other.copy(deep=deep).to_frame(index=False)
self_df = self.copy(deep=deep).to_frame(index=False)
if col_names is not None:
other_df.columns = col_names
self_df.columns = col_names
result_df = cudf.merge(self_df, other_df, how="inner")
midx = type(self)._from_data(result_df._data)
midx.names = res_name
if sort in {None, True} and len(other):
return midx.sort_values()
return midx
@_performance_tracking
def _copy_type_metadata(self: Self, other: Self) -> Self:
res = super()._copy_type_metadata(other)
if isinstance(other, MultiIndex):
res._names = other._names
return res
@_performance_tracking
def _split_columns_by_levels(
self, levels: tuple, *, in_levels: bool
) -> Generator[tuple[Any, column.ColumnBase], None, None]:
# This function assumes that for levels with duplicate names, they are
# specified by indices, not name by ``levels``. E.g. [None, None] can
# only be specified by 0, 1, not "None".
level_names = list(self.names)
level_indices = {
lv if isinstance(lv, int) else level_names.index(lv)
for lv in levels
}
for i, (name, col) in enumerate(
zip(self.names, self._columns, strict=True)
):
if in_levels and i in level_indices:
name = f"level_{i}" if name is None else name
yield name, col
elif not in_levels and i not in level_indices:
yield name, col
@_performance_tracking
def _new_index_for_reset_index(
self, levels: tuple | None, name
) -> None | Index:
"""Return the new index after .reset_index"""
if levels is None:
return None
index_columns, index_names = [], []
for name, col in self._split_columns_by_levels(
levels, in_levels=False
):
index_columns.append(col)
index_names.append(name)
if not index_columns:
# None is caught later to return RangeIndex
return None
index = _index_from_data(
dict(enumerate(index_columns)),
name=name,
)
if isinstance(index, type(self)):
index.names = index_names
else:
index.name = index_names[0]
return index
def _columns_for_reset_index(
self, levels: tuple | None
) -> Generator[tuple[Any, column.ColumnBase], None, None]:
"""Return the columns and column names for .reset_index"""
if levels is None:
for i, (col, name) in enumerate(
zip(self._columns, self.names, strict=True)
):
yield f"level_{i}" if name is None else name, col
else:
yield from self._split_columns_by_levels(levels, in_levels=True)
def repeat(self, repeats, axis=None) -> Self:
return self._from_data(
self._data._from_columns_like_self(
self._repeat(self._columns, repeats, axis)
)
)
@_performance_tracking
@_warn_no_dask_cudf
def __dask_tokenize__(self):
return Frame.__dask_tokenize__(self)
| MultiIndex |
python | pytorch__pytorch | test/dynamo/test_bytecode_utils.py | {
"start": 270,
"end": 21015
} | class ____(torch._dynamo.test_case.TestCase):
@skipIfNotPy311
def test_linetable_311_writer1(self):
def fn():
a = 10
b = 20
# prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g()
c = a + g(b)
f = "linetable_writer"
return f"Test if {f} generates correct co_linetable: {c}"
keys = bytecode_transformation.get_code_keys()
code_options = {k: getattr(fn.__code__, k) for k in keys}
result = bytecode_transformation.clean_and_assemble_instructions(
bytecode_transformation.cleaned_instructions(fn.__code__),
keys,
code_options,
)
l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions())
self.assertEqual(len(l1), len(l2))
for p1, p2 in zip(l1, l2):
self.assertEqual(p1, p2)
# TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14
# In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable),
# so we do not set this attribute ourselves.
self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
@skipIfNotPy311
def test_linetable_311_writer2(self):
"""
test large ops (LOAD_METHOD) and EXTENDED_ARGS
fn_str is in the form:
def fn():
...
x0 = 1
x1 = 1
...
l = [x0, x1, ...]
"""
fn_str = f"""\
def fn():
foo.bar(1, 2, 3)
{str(chr(10)).join(" " * 4 + "x" + str(i) + " = 1" for i in range(1 << 9))}
l = [{" ".join("x" + str(i) + "," for i in range(1 << 9))}]
"""
locals = {}
exec(fn_str, {}, locals)
fn = locals["fn"]
orig_inst_str = "\n".join(list(map(str, dis.get_instructions(fn))))
self.assertIn("EXTENDED_ARG", orig_inst_str)
load_method_str = "LOAD_ATTR" if sys.version_info >= (3, 12) else "LOAD_METHOD"
self.assertIn(load_method_str, orig_inst_str)
keys = bytecode_transformation.get_code_keys()
code_options = {k: getattr(fn.__code__, k) for k in keys}
result = bytecode_transformation.clean_and_assemble_instructions(
bytecode_transformation.cleaned_instructions(fn.__code__),
keys,
code_options,
)
new_inst_str = "\n".join(list(map(str, result[0])))
self.assertIn("EXTENDED_ARG", new_inst_str)
self.assertIn(load_method_str, new_inst_str)
l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions())
self.assertEqual(len(l1), len(l2))
for p1, p2 in zip(l1, l2):
self.assertEqual(p1, p2)
self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
@unittest.skipIf(
sys.version_info >= (3, 11),
"linetable test for Python 3.10",
)
def test_linetable_310_writer(self):
def fn():
a = 10
b = 20
c = a + b
f = "linetable_writer"
return f"Test if {f} generates correct co_linetable: {c}"
inst = dis.get_instructions(fn)
result = bytecode_transformation.assemble(inst, fn.__code__.co_firstlineno)
self.assertTrue(result[1] == fn.__code__.co_linetable)
def test_if_tensor_is_none(self):
"""
Python 3.11 adds new jump instructions that check if
TOS is None. We do not support these instructions.
"""
def f(x, y):
z = 1
if x is None:
z *= 2
if y is not None:
z *= 3
return z
opt_f = torch.compile(f, backend="eager", fullgraph=True)
self.assertEqual(opt_f(None, torch.ones(2)), 6)
if sys.version_info >= (3, 11):
insts = bytecode_transformation.cleaned_instructions(f.__code__)
for inst in insts:
self.assertNotIn("_NONE", inst.opname)
@skipIfNotPy311
def test_py311_jump_offset(self):
new_inst = bytecode_transformation.create_instruction
consts = (None, 1, 2, 3, 4)
def create_test_code(jump_opname, target_idx):
targets = [
new_inst("LOAD_CONST", argval=1),
new_inst("LOAD_CONST", argval=3),
]
jump_to_target_inst = new_inst(jump_opname, target=targets[target_idx])
"""
pseudocode of generated bytecode:
def test_py311_fn():
goto target1
target0:
return 1
target1:
goto [target0/target2] (via fwd or bwd jump)
return 2
target2:
return 3
return 4
"""
# test with LOAD_GLOBAL since it has a different instruction size
insts = [
new_inst("RESUME", arg=0),
new_inst("JUMP_FORWARD", target=jump_to_target_inst),
targets[0],
new_inst("LOAD_GLOBAL", arg=0, argval="print"),
new_inst("POP_TOP"),
new_inst("RETURN_VALUE"),
jump_to_target_inst,
new_inst("LOAD_CONST", argval=2),
new_inst("LOAD_GLOBAL", arg=0, argval="print"),
new_inst("POP_TOP"),
new_inst("RETURN_VALUE"),
targets[1],
new_inst("RETURN_VALUE"),
new_inst("LOAD_CONST", argval=4),
new_inst("RETURN_VALUE"),
]
code_options = collections.OrderedDict(
[
("co_argcount", 0),
("co_posonlyargcount", 0),
("co_kwonlyargcount", 0),
("co_nlocals", 0),
("co_stacksize", 2),
("co_flags", 3),
("co_code", b""),
("co_consts", consts),
("co_names", ("print",)),
("co_varnames", ()),
("co_filename", __file__),
("co_name", "test_py311_fn"),
("co_qualname", "test_py311_fn"),
("co_firstlineno", 1),
("co_linetable", b""),
("co_exceptiontable", b""),
("co_freevars", ()),
("co_cellvars", ()),
]
)
return bytecode_transformation.clean_and_assemble_instructions(
insts,
list(code_options.keys()),
code_options,
)
# format: jump_opname, target_idx, expected forward jump, expected return value
test_args = (
("JUMP_FORWARD", 0, False, 1),
("JUMP_FORWARD", 1, True, 3),
("JUMP_BACKWARD", 0, False, 1),
("JUMP_BACKWARD", 1, True, 3),
)
for test in test_args:
insts, code = create_test_code(test[0], test[1])
# check if offset of latest jump instruction is forward/backward
for inst in reversed(insts):
if inst.opname.startswith("JUMP"):
if test[2]:
self.assertIn("FORWARD", inst.opname)
else:
self.assertIn("BACKWARD", inst.opname)
break
# run the code and check result
def dummy_fn():
pass
dummy_fn.__code__ = code
self.assertEqual(dummy_fn(), test[3])
dummy_opt = torch.compile(dummy_fn, backend="eager")
self.assertEqual(dummy_opt(), test[3])
def test_exception_table_encode_varint(self):
# these numbers have no real meaning to them
nums = [
0b111_101010_000000,
0b1100_111000_010101_101010,
]
b = bytecode_transformation.encode_exception_table_varint(
nums[0]
) + bytecode_transformation.encode_exception_table_varint(nums[1])
nums_new = []
b_iter = iter(bytes(b))
while True:
try:
nums_new.append(
bytecode_transformation.decode_exception_table_varint(b_iter)
)
except StopIteration:
break
self.assertEqual(nums, nums_new)
@skipIfNotPy311
def test_exception_table_parsing(self):
def fn():
try:
with a():
b()
c()
except Exception:
d()
finally:
e()
f()
tab = bytecode_transformation.parse_exception_table(
fn.__code__.co_exceptiontable
)
b = bytecode_transformation.assemble_exception_table(tab)
self.assertEqual(b, fn.__code__.co_exceptiontable)
@skipIfNotPy311
def test_exception_table_e2e(self):
def fn():
try:
with a():
b()
c()
except Exception:
d()
finally:
e()
f()
def nothing(*args):
pass
code, _ = bytecode_transformation.transform_code_object(fn.__code__, nothing)
self.assertEqual(code.co_exceptiontable, fn.__code__.co_exceptiontable)
@skipIfNotPy311
def test_exception_table_e2e_2(self):
# last instructions of an exn_table entry is a large instruction
# i.e., LOAD_GLOBAL a
def fn():
try:
return a
except Exception:
pass
def nothing(*args):
pass
code, _ = bytecode_transformation.transform_code_object(fn.__code__, nothing)
self.assertEqual(code.co_exceptiontable, fn.__code__.co_exceptiontable)
@skipIfNotPy311
def test_exception_table_entry_propagation(self):
insts = []
for _ in range(10):
insts.append(bytecode_transformation.create_instruction("NOP"))
insts[8].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[0], insts[9], insts[0], 0, True
)
insts[0].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[0], insts[0], insts[1], 0, True
)
insts[1].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[0], insts[2], insts[2], 0, True
)
insts[5].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[4], insts[6], insts[3], 0, True
)
insts[9].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[9], insts[9], insts[4], 0, True
)
insts[7].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[7], insts[9], insts[5], 0, True
)
bytecode_transformation.propagate_inst_exn_table_entries(insts)
expected = [1, 2, 2, 0, 3, 3, 3, 5, 5, 4]
for inst, exp in zip(insts, expected):
self.assertIsNotNone(inst.exn_tab_entry)
self.assertIs(inst.exn_tab_entry.target, insts[exp])
@skipIfNotPy311
def test_compute_exception_table_nested(self):
insts = []
for _ in range(20):
insts.append(bytecode_transformation.create_instruction("NOP"))
insts[10].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[1], insts[10], insts[0], 0, True
)
insts[0].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[1], insts[1], insts[1], 0, True
)
insts[1].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[1], insts[3], insts[2], 0, True
)
insts[5].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[5], insts[7], insts[3], 0, True
)
insts[9].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[10], insts[10], insts[4], 0, True
)
insts[7].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[8], insts[10], insts[5], 0, True
)
insts[14].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[13], insts[17], insts[6], 0, True
)
insts[16].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[15], insts[16], insts[7], 0, True
)
bytecode_transformation.update_offsets(insts)
tab = bytecode_transformation.compute_exception_table(insts)
expected = [
(1, 1, 1),
(2, 3, 2),
(4, 4, 0),
(5, 7, 3),
(8, 9, 5),
(10, 10, 4),
(13, 14, 6),
(15, 16, 7),
(17, 17, 6),
]
self.assertEqual(len(tab), len(expected))
for entry, exp in zip(tab, expected):
self.assertEqual(entry.start, exp[0] * 2)
self.assertEqual(entry.end, exp[1] * 2)
self.assertEqual(entry.target, exp[2] * 2)
@skipIfNotPy311
def test_remove_dead_code_with_exn_table_entries(self):
create_instruction = bytecode_transformation.create_instruction
target1 = create_instruction("NOP")
target2 = create_instruction("NOP")
target3 = create_instruction("NOP")
exn_start = create_instruction("NOP")
exn_end = create_instruction("NOP")
insts = [
create_instruction("JUMP_FORWARD", target=target1),
exn_start, # dead
target1,
create_instruction("JUMP_FORWARD", target=target3),
exn_end, # dead
target2,
target3,
]
exn_start.exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
exn_start, exn_end, target2, 0, True
)
bytecode_transformation.propagate_inst_exn_table_entries(insts)
insts = bytecode_analysis.remove_dead_code(insts)
self.assertEqual(len(insts), 5)
self.assertNotIn(exn_start, insts)
self.assertNotIn(exn_end, insts)
self.assertIn(target2, insts)
self.assertIn(target3, insts)
bytecode_transformation.update_offsets(insts)
tab = bytecode_transformation.compute_exception_table(insts)
self.assertEqual(len(tab), 1)
self.assertEqual(tab[0].start, 2)
self.assertEqual(tab[0].end, 4)
self.assertEqual(tab[0].target, 6)
def test_bytecode_from_template(self):
def fn(d1):
for k, v in d1.items():
d2[k] = v
varname_map = {"d1": "var1", "d2": "var2", "k": "var3", "v": "var4"}
insts = bytecode_transformation.bytecode_from_template(fn, varname_map)
for inst in insts:
self.assertIsNone(inst.starts_line)
if inst.opname.startswith("LOAD"):
self.assertNotIn(inst.argval, varname_map)
if inst.opname not in ("LOAD_GLOBAL", "LOAD_ATTR"):
self.assertIsNone(inst.arg)
self.assertFalse(inst.opname.startswith("RETURN"))
@skipIfNotPy311
def test_bytecode_from_template_noprefix(self):
# Test that 3.11+ prefix instructions are removed
def gen_fn():
cl = None
def fn():
return cl
return fn
fn = gen_fn()
dis_insts = list(dis.get_instructions(fn))
names = {inst.opname for inst in dis_insts}
self.assertIn("RESUME", names)
self.assertIn("COPY_FREE_VARS", names)
insts = bytecode_transformation.bytecode_from_template(fn)
names = {inst.opname for inst in insts}
self.assertNotIn("RESUME", names)
self.assertNotIn("COPY_FREE_VARS", names)
def test_bytecode_from_template_noreturn1(self):
# Test that functions with multiple returns will have their
# returns replaced with jumps to the end
def fn():
if x:
return y
z = 3
return z
dis_insts = list(dis.get_instructions(fn))
dis_returns = list(filter(lambda x: x.opname.startswith("RETURN"), dis_insts))
self.assertGreater(len(dis_returns), 1)
self.assertTrue(dis_insts[-1].opname.startswith("RETURN"))
insts = bytecode_transformation.bytecode_from_template(fn, noprefix=False)
self.assertEqual(insts[-1].opname, "NOP")
self.assertEqual(len(dis_insts), len(insts))
for i0, i1 in zip(dis_insts, insts):
if i0.opname.startswith("RETURN"):
if i1 is insts[-1]:
continue
self.assertIn("JUMP", i1.opname)
self.assertIs(i1.target, insts[-1])
# Should work with 3.10, but testing with 3.11+ is sufficient.
# In 3.8, `fn` ends with a RETURN_VALUE.
@skipIfNotPy311
def test_bytecode_from_template_noreturn2(self):
# Test function that doesn't end with RETURN_VALUE
def fn():
if x:
return x
if x:
return x
raise RuntimeError
dis_insts = list(dis.get_instructions(fn))
self.assertFalse(dis_insts[-1].opname.startswith("RETURN"))
insts = bytecode_transformation.bytecode_from_template(fn, noprefix=False)
self.assertEqual(insts[-1].opname, "NOP")
self.assertEqual(insts[-2].opname, dis_insts[-1].opname)
self.assertEqual(len(dis_insts) + 1, len(insts))
for i0, i1 in zip(dis_insts, insts):
if i0.opname.startswith("RETURN"):
self.assertIn("JUMP", i1.opname)
self.assertIs(i1.target, insts[-1])
@unittest.skipIf(sys.version_info >= (3, 14), "3.14+ removed RETURN_CONST")
@skipIfNotPy312
def test_bytecode_from_template_noreturn_const(self):
# Test 3.12+ RETURN_CONST
def fn():
if x:
return 1
return 0
dis_insts = list(dis.get_instructions(fn))
dis_return_consts = list(
filter(lambda x: x.opname == "RETURN_CONST", dis_insts)
)
self.assertGreater(len(dis_return_consts), 1)
self.assertTrue(dis_insts[-1].opname == "RETURN_CONST")
insts = bytecode_transformation.bytecode_from_template(fn, noprefix=False)
self.assertEqual(insts[-1].opname, "NOP")
insts_i = 0
for inst in dis_insts:
if inst.opname == "RETURN_CONST":
self.assertEqual(insts[insts_i].opname, "LOAD_CONST")
insts_i += 1
if insts_i != len(insts) - 1:
self.assertIn("JUMP", insts[insts_i].opname)
self.assertIs(insts[insts_i].target, insts[-1])
insts_i += 1
def test_bytecode_analysis_jump_backward_no_interrupt(self):
# bytecode_analysis fails if JUMP_BACKWARD_NO_INTERRUPT is not terminal in 3.13+
@torch.compile(backend="eager")
def fn(x):
# graph break causes bytecode_analysis to analyze the rest of this function
torch._dynamo.graph_break()
with torch.no_grad():
try:
x = x + 1
except NotImplementedError:
x = x + 1
except Exception:
x = x + 1
return x
self.assertEqual(fn(torch.ones(3)), torch.ones(3) + 1)
# https://github.com/pytorch/pytorch/issues/160471
def test_extended_args_starts_line(self):
# NOTE: need to LOAD_CONST i before LOAD_FAST x
# in order to get an EXTENDED_ARG with starts_line set
# NOTE: 3.14+ introduced LOAD_SMALL_INT, so integers need to be >= 256
# in order for LOAD_CONST to be generated
lines = "\n".join(f" x = {i + 1000} + x" for i in range(300))
fn_str = f"def fn(x):\n{lines}"
locals = {}
exec(fn_str, {}, locals)
fn = locals["fn"]
for inst in dis.get_instructions(fn):
if inst.opname == "EXTENDED_ARG" and inst.starts_line:
break
else:
self.assertTrue(
False, "bad test case: no EXTENDED_ARG with starts_line found"
)
def transformations(instructions, _):
for inst in instructions:
if inst.starts_line == 301:
break
else:
self.assertTrue(False, "test failure: 301 starts_line not found")
return instructions
bytecode_transformation.transform_code_object(fn.__code__, transformations)
| BytecodeTests |
python | django__django | tests/admin_views/admin.py | {
"start": 29773,
"end": 29857
} | class ____(admin.ModelAdmin):
readonly_fields = ("plotdetails",)
| PlotReadonlyAdmin |
python | django__django | tests/custom_managers/models.py | {
"start": 4216,
"end": 5014
} | class ____(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
is_published = models.BooleanField(default=False)
authors = models.ManyToManyField(Person, related_name="books")
fun_authors = models.ManyToManyField(FunPerson, related_name="books")
favorite_things = GenericRelation(
Person,
content_type_field="favorite_thing_type",
object_id_field="favorite_thing_id",
)
fun_people_favorite_things = GenericRelation(
FunPerson,
content_type_field="favorite_thing_type",
object_id_field="favorite_thing_id",
)
published_objects = PublishedBookManager()
annotated_objects = AnnotatedBookManager()
class Meta:
base_manager_name = "annotated_objects"
| Book |
python | streamlit__streamlit | lib/streamlit/runtime/caching/storage/cache_storage_protocol.py | {
"start": 2551,
"end": 2643
} | class ____(Exception):
"""Base exception raised by the cache storage."""
| CacheStorageError |
python | tiangolo__fastapi | scripts/sponsors.py | {
"start": 1220,
"end": 1309
} | class ____(BaseModel):
edges: list[SponsorshipAsMaintainerEdge]
| SponsorshipAsMaintainer |
python | run-llama__llama_index | llama-index-experimental/llama_index/experimental/param_tuner/base.py | {
"start": 470,
"end": 1529
} | class ____(BaseModel):
run_results: List[RunResult]
best_idx: int
@property
def best_run_result(self) -> RunResult:
"""Get best run result."""
return self.run_results[self.best_idx]
def generate_param_combinations(param_dict: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Generate parameter combinations."""
def _generate_param_combinations_helper(
param_dict: Dict[str, Any], curr_param_dict: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Helper function."""
if len(param_dict) == 0:
return [deepcopy(curr_param_dict)]
param_dict = deepcopy(param_dict)
param_name, param_vals = param_dict.popitem()
param_combinations = []
for param_val in param_vals:
curr_param_dict[param_name] = param_val
param_combinations.extend(
_generate_param_combinations_helper(param_dict, curr_param_dict)
)
return param_combinations
return _generate_param_combinations_helper(param_dict, {})
| TunedResult |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/sagemaker.py | {
"start": 11563,
"end": 12655
} | class ____(SageMakerBaseSensor):
"""
Poll the auto ML job until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SageMakerAutoMLSensor`
:param job_name: unique name of the AutoML job to watch.
"""
template_fields: Sequence[str] = aws_template_fields(
"job_name",
)
def __init__(self, *, job_name: str, **kwargs):
super().__init__(resource_type="autoML job", **kwargs)
self.job_name = job_name
def non_terminal_states(self) -> set[str]:
return SageMakerHook.non_terminal_states
def failed_states(self) -> set[str]:
return SageMakerHook.failed_states
def get_sagemaker_response(self) -> dict:
self.log.info("Poking Sagemaker AutoML Execution %s", self.job_name)
return self.hook._describe_auto_ml_job(self.job_name)
def state_from_response(self, response: dict) -> str:
return response["AutoMLJobStatus"]
| SageMakerAutoMLSensor |
python | aio-libs__aiohttp | aiohttp/client_exceptions.py | {
"start": 6151,
"end": 6256
} | class ____(ServerConnectionError, asyncio.TimeoutError):
"""Server timeout error."""
| ServerTimeoutError |
python | wandb__wandb | wandb/vendor/pygments/lexers/pascal.py | {
"start": 26923,
"end": 32646
} | class ____(RegexLexer):
"""
For Ada source code.
.. versionadded:: 1.3
"""
name = 'Ada'
aliases = ['ada', 'ada95', 'ada2005']
filenames = ['*.adb', '*.ads', '*.ada']
mimetypes = ['text/x-ada']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
(r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
(r'(subtype|type)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
(r'task|protected', Keyword.Declaration),
(r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
(r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
(r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text,
Comment.Preproc)),
(r'(true|false|null)\b', Keyword.Constant),
(words((
'Address', 'Byte', 'Boolean', 'Character', 'Controlled', 'Count',
'Cursor', 'Duration', 'File_Mode', 'File_Type', 'Float', 'Generator',
'Integer', 'Long_Float', 'Long_Integer', 'Long_Long_Float',
'Long_Long_Integer', 'Natural', 'Positive', 'Reference_Type',
'Short_Float', 'Short_Integer', 'Short_Short_Float',
'Short_Short_Integer', 'String', 'Wide_Character', 'Wide_String'),
suffix=r'\b'),
Keyword.Type),
(r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
(r'generic|private', Keyword.Declaration),
(r'package', Keyword.Declaration, 'package'),
(r'array\b', Keyword.Reserved, 'array_def'),
(r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'(\w+)(\s*)(:)(\s*)(constant)',
bygroups(Name.Constant, Text, Punctuation, Text,
Keyword.Reserved)),
(r'<<\w+>>', Name.Label),
(r'(\w+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
(words((
'abort', 'abs', 'abstract', 'accept', 'access', 'aliased', 'all',
'array', 'at', 'begin', 'body', 'case', 'constant', 'declare',
'delay', 'delta', 'digits', 'do', 'else', 'elsif', 'end', 'entry',
'exception', 'exit', 'interface', 'for', 'goto', 'if', 'is', 'limited',
'loop', 'new', 'null', 'of', 'or', 'others', 'out', 'overriding',
'pragma', 'protected', 'raise', 'range', 'record', 'renames', 'requeue',
'return', 'reverse', 'select', 'separate', 'subtype', 'synchronized',
'task', 'tagged', 'terminate', 'then', 'type', 'until', 'when',
'while', 'xor'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(r'"[^"]*"', String),
include('attribute'),
include('numbers'),
(r"'[^']'", String.Character),
(r'(\w+)(\s*|[(,])', bygroups(Name, using(this))),
(r"(<>|=>|:=|[()|:;,.'])", Punctuation),
(r'[*<>+=/&-]', Operator),
(r'\n+', Text),
],
'numbers': [
(r'[0-9_]+#[0-9a-f]+#', Number.Hex),
(r'[0-9_]+\.[0-9_]*', Number.Float),
(r'[0-9_]+', Number.Integer),
],
'attribute': [
(r"(')(\w+)", bygroups(Punctuation, Name.Attribute)),
],
'subprogram': [
(r'\(', Punctuation, ('#pop', 'formal_part')),
(r';', Punctuation, '#pop'),
(r'is\b', Keyword.Reserved, '#pop'),
(r'"[^"]+"|\w+', Name.Function),
include('root'),
],
'end': [
('(if|case|record|loop|select)', Keyword.Reserved),
('"[^"]+"|[\w.]+', Name.Function),
('\s+', Text),
(';', Punctuation, '#pop'),
],
'type_def': [
(r';', Punctuation, '#pop'),
(r'\(', Punctuation, 'formal_part'),
(r'with|and|use', Keyword.Reserved),
(r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
(r'record\b', Keyword.Reserved, ('record_def')),
(r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'),
include('root'),
],
'array_def': [
(r';', Punctuation, '#pop'),
(r'(\w+)(\s+)(range)', bygroups(Keyword.Type, Text, Keyword.Reserved)),
include('root'),
],
'record_def': [
(r'end record', Keyword.Reserved, '#pop'),
include('root'),
],
'import': [
(r'[\w.]+', Name.Namespace, '#pop'),
default('#pop'),
],
'formal_part': [
(r'\)', Punctuation, '#pop'),
(r'\w+', Name.Variable),
(r',|:[^=]', Punctuation),
(r'(in|not|null|out|access)\b', Keyword.Reserved),
include('root'),
],
'package': [
('body', Keyword.Declaration),
('is\s+new|renames', Keyword.Reserved),
('is', Keyword.Reserved, '#pop'),
(';', Punctuation, '#pop'),
('\(', Punctuation, 'package_instantiation'),
('([\w.]+)', Name.Class),
include('root'),
],
'package_instantiation': [
(r'("[^"]+"|\w+)(\s+)(=>)', bygroups(Name.Variable, Text, Punctuation)),
(r'[\w.\'"]', Text),
(r'\)', Punctuation, '#pop'),
include('root'),
],
}
| AdaLexer |
python | bokeh__bokeh | src/bokeh/resources.py | {
"start": 7477,
"end": 7590
} | class ____(Protocol):
@staticmethod
def __call__(components: list[str], kind: Kind) -> list[str]: ...
| UrlsFn |
python | getsentry__sentry | tests/sentry/workflow_engine/migration_helpers/test_migrate_alert_rule.py | {
"start": 21465,
"end": 27595
} | class ____(BaseMetricAlertMigrationTest):
def setUp(self) -> None:
self.metric_alert = self.create_alert_rule()
(
self.data_source,
self.detector_data_condition_group,
self.workflow,
self.detector,
self.detector_state,
self.alert_rule_detector,
self.alert_rule_workflow,
self.detector_workflow,
self.data_source_detector,
) = self.create_migrated_metric_alert_objects(self.metric_alert)
# we need to set up the resolve condition here, because the dual delete helper expects it
# its content doesn't matter, it just needs to exist
self.resolve_detector_trigger = self.create_migrated_metric_alert_rule_resolve_objects(
self.metric_alert, 67, Condition.LESS_OR_EQUAL
)
def test_dual_delete_metric_alert_workflow(self) -> None:
dual_delete_migrated_alert_rule(self.metric_alert)
with self.tasks():
run_scheduled_deletions()
# check workflow-related tables
assert not Workflow.objects.filter(id=self.workflow.id).exists()
assert not AlertRuleWorkflow.objects.filter(id=self.alert_rule_workflow.id).exists()
def test_dual_delete_metric_alert_detector(self) -> None:
dual_delete_migrated_alert_rule(self.metric_alert)
with self.tasks():
run_scheduled_deletions()
# check detector-related tables
assert not Detector.objects.filter(id=self.detector.id).exists()
assert not AlertRuleDetector.objects.filter(id=self.alert_rule_detector.id).exists()
assert not DetectorWorkflow.objects.filter(id=self.detector_workflow.id).exists()
assert not DetectorState.objects.filter(id=self.detector_state.id).exists()
assert not DataSourceDetector.objects.filter(id=self.data_source_detector.id).exists()
assert not DataConditionGroup.objects.filter(
id=self.detector_data_condition_group.id
).exists()
def test_dual_delete_metric_alert_data_source(self) -> None:
dual_delete_migrated_alert_rule(self.metric_alert)
with self.tasks():
run_scheduled_deletions()
# check data source
assert not DataSource.objects.filter(id=self.data_source.id).exists()
def test_dual_delete_comprehensive(self) -> None:
"""
If we dual delete an alert rule, the associated ACI objects for its triggers and trigger actions
also need to be deleted.
"""
alert_rule_trigger = self.create_alert_rule_trigger(
alert_rule=self.metric_alert, label="critical", alert_threshold=200
)
alert_rule_trigger_action = self.create_alert_rule_trigger_action(
alert_rule_trigger=alert_rule_trigger
)
detector_trigger, action_filter, resolve_action_filter = (
self.create_migrated_metric_alert_rule_trigger_objects(
alert_rule_trigger, DetectorPriorityLevel.HIGH, Condition.GREATER
)
)
action_filter_dcg = action_filter.condition_group
action, data_condition_group_action, aarta = (
self.create_migrated_metric_alert_rule_action_objects(alert_rule_trigger_action)
)
dual_delete_migrated_alert_rule(self.metric_alert)
with self.tasks():
run_scheduled_deletions()
# check trigger action objects
assert not Action.objects.filter(id=action.id).exists()
assert not DataConditionGroupAction.objects.filter(
id=data_condition_group_action.id
).exists()
assert not ActionAlertRuleTriggerAction.objects.filter(id=aarta.id).exists()
# check resolution objects
assert not DataCondition.objects.filter(id=self.resolve_detector_trigger.id).exists()
# check trigger objects
assert not DataConditionGroup.objects.filter(id=action_filter_dcg.id).exists()
assert not DataCondition.objects.filter(id=detector_trigger.id).exists()
assert not DataCondition.objects.filter(id=resolve_action_filter.id).exists()
assert not DataConditionAlertRuleTrigger.objects.filter(
data_condition=detector_trigger
).exists()
assert not DataCondition.objects.filter(id=action_filter.id).exists()
@mock.patch("sentry.workflow_engine.migration_helpers.alert_rule.logger")
def test_dual_delete_twice(self, mock_logger: mock.MagicMock) -> None:
"""
Test that nothing happens if dual delete is run twice. We should just quit early the
second time.
"""
dual_delete_migrated_alert_rule(self.metric_alert)
with self.tasks():
run_scheduled_deletions()
assert not Detector.objects.filter(id=self.detector.id).exists()
dual_delete_migrated_alert_rule(self.metric_alert)
with self.tasks():
run_scheduled_deletions()
mock_logger.info.assert_called_with(
"alert rule was not dual written or objects were already deleted, returning early",
extra={"alert_rule_id": self.metric_alert.id},
)
def test_dual_delete_twice_before_running_scheduled_deletions(self) -> None:
"""
Test that nothing happens if dual delete is run twice (before scheduled deletions
are run).
"""
dual_delete_migrated_alert_rule(self.metric_alert)
dual_delete_migrated_alert_rule(self.metric_alert)
with self.tasks():
run_scheduled_deletions()
assert not Detector.objects.filter(id=self.detector.id).exists()
def test_dual_delete_missing_workflow(self) -> None:
"""
Test that if we are missing the Workflow and AlertRuleWorkflow models that we still delete the detector
"""
self.workflow.delete()
self.alert_rule_workflow.delete()
dual_delete_migrated_alert_rule(self.metric_alert)
with self.tasks():
run_scheduled_deletions()
assert not Detector.objects.filter(id=self.detector.id).exists()
| DualDeleteAlertRuleTest |
python | sphinx-doc__sphinx | sphinx/ext/doctest.py | {
"start": 9898,
"end": 24602
} | class ____(Builder):
"""Runs test snippets in the documentation."""
name = 'doctest'
epilog = __(
'Testing of doctests in the sources finished, look at the '
'results in %(outdir)s/output.txt.'
)
def init(self) -> None:
# default options
self.opt = self.config.doctest_default_flags
# HACK HACK HACK
# doctest compiles its snippets with type 'single'. That is nice
# for doctest examples but unusable for multi-statement code such
# as setup code -- to be able to use doctest error reporting with
# that code nevertheless, we monkey-patch the "compile" it uses.
doctest.compile = self.compile # type: ignore[attr-defined]
sys.path[0:0] = self.config.doctest_path
self.type = 'single'
self.total_failures = 0
self.total_tries = 0
self.setup_failures = 0
self.setup_tries = 0
self.cleanup_failures = 0
self.cleanup_tries = 0
date = time.strftime('%Y-%m-%d %H:%M:%S')
outpath = self.outdir.joinpath('output.txt')
self.outfile = outpath.open('w', encoding='utf-8')
line = '=' * len(date)
self.outfile.write(
f'Results of doctest builder run on {date}\n'
f'=================================={line}\n'
)
def __del__(self) -> None:
# free resources upon destruction (the file handler might not be
# closed if the builder is never used)
if hasattr(self, 'outfile'):
self.outfile.close()
def _out(self, text: str) -> None:
logger.info(text, nonl=True)
self.outfile.write(text)
def _warn_out(self, text: str) -> None:
if self.config.verbosity < 0:
logger.warning(text)
else:
logger.info(text, nonl=True)
self.outfile.write(text)
def get_target_uri(self, docname: str, typ: str | None = None) -> str:
return ''
def get_outdated_docs(self) -> set[str]:
return self.env.found_docs
def finish(self) -> None:
# write executive summary
def s(v: int) -> str:
return 's' if v != 1 else ''
header = 'Doctest summary'
if self.total_failures or self.setup_failures or self.cleanup_failures:
self._app.statuscode = 1
if self.config.doctest_fail_fast:
header = f'{header} (exiting after first failed test)'
underline = '=' * len(header)
self._out(
f"""
{header}
{underline}
{self.total_tries:5} test{s(self.total_tries)}
{self.total_failures:5} failure{s(self.total_failures)} in tests
{self.setup_failures:5} failure{s(self.setup_failures)} in setup code
{self.cleanup_failures:5} failure{s(self.cleanup_failures)} in cleanup code
"""
)
self.outfile.close()
def write_documents(self, docnames: Set[str]) -> None:
logger.info(bold('running tests...'))
for docname in sorted(docnames):
# no need to resolve the doctree
doctree = self.env.get_doctree(docname)
success = self.test_doc(docname, doctree)
if not success and self.config.doctest_fail_fast:
break
def get_filename_for_node(self, node: Node, docname: str) -> str:
"""Try to get the file which actually contains the doctest, not the
filename of the document it's included in.
"""
try:
filename = relpath(node.source, self.env.srcdir) # type: ignore[arg-type]
return filename.partition(':docstring of ')[0]
except Exception:
return str(self.env.doc2path(docname, False))
@staticmethod
def get_line_number(node: Node) -> int | None:
"""Get the real line number or admit we don't know."""
# TODO: Work out how to store or calculate real (file-relative)
# line numbers for doctest blocks in docstrings.
if ':docstring of ' in os.path.basename(node.source or ''):
# The line number is given relative to the stripped docstring,
# not the file. This is correct where it is set, in
# `docutils.nodes.Node.setup_child`, but Sphinx should report
# relative to the file, not the docstring.
return None
if node.line is not None:
# TODO: find the root cause of this off by one error.
return node.line - 1
return None
def skipped(self, node: Element) -> bool:
if 'skipif' not in node:
return False
else:
condition = node['skipif']
context: dict[str, Any] = {}
if self.config.doctest_global_setup:
exec(self.config.doctest_global_setup, context) # NoQA: S102
should_skip = eval(condition, context) # NoQA: S307
if self.config.doctest_global_cleanup:
exec(self.config.doctest_global_cleanup, context) # NoQA: S102
return should_skip
def test_doc(self, docname: str, doctree: Node) -> bool:
groups: dict[str, TestGroup] = {}
add_to_all_groups = []
self.setup_runner = SphinxDocTestRunner(verbose=False, optionflags=self.opt)
self.test_runner = SphinxDocTestRunner(verbose=False, optionflags=self.opt)
self.cleanup_runner = SphinxDocTestRunner(verbose=False, optionflags=self.opt)
self.test_runner._fakeout = self.setup_runner._fakeout # type: ignore[attr-defined]
self.cleanup_runner._fakeout = self.setup_runner._fakeout # type: ignore[attr-defined]
if self.config.doctest_test_doctest_blocks:
condition = _condition_with_doctest
else:
condition = _condition_default
for node in doctree.findall(condition):
if self.skipped(node): # type: ignore[arg-type]
continue
source = node['test'] if 'test' in node else node.astext() # type: ignore[index, operator]
filename = self.get_filename_for_node(node, docname)
line_number = self.get_line_number(node)
if not source:
logger.warning(
__('no code/output in %s block at %s:%s'),
node.get('testnodetype', 'doctest'), # type: ignore[attr-defined]
filename,
line_number,
)
code = TestCode(
source,
type=node.get('testnodetype', 'doctest'), # type: ignore[attr-defined]
filename=filename,
lineno=line_number, # type: ignore[arg-type]
options=node.get('options'), # type: ignore[attr-defined]
)
node_groups = node.get('groups', [self.config.doctest_test_doctest_blocks]) # type: ignore[attr-defined]
if '*' in node_groups:
add_to_all_groups.append(code)
continue
for groupname in node_groups:
if groupname not in groups:
groups[groupname] = TestGroup(groupname)
groups[groupname].add_code(code)
for code in add_to_all_groups:
for group in groups.values():
group.add_code(code)
if self.config.doctest_global_setup:
code = TestCode(
self.config.doctest_global_setup,
'testsetup',
filename='<global_setup>',
lineno=0,
)
for group in groups.values():
group.add_code(code, prepend=True)
if self.config.doctest_global_cleanup:
code = TestCode(
self.config.doctest_global_cleanup,
'testcleanup',
filename='<global_cleanup>',
lineno=0,
)
for group in groups.values():
group.add_code(code)
if not groups:
return True
show_successes = self.config.doctest_show_successes
if show_successes:
self._out(f'\nDocument: {docname}\n----------{"-" * len(docname)}\n')
success = True
for group in groups.values():
if not self.test_group(group):
success = False
if self.config.doctest_fail_fast:
break
# Separately count results from setup code
res_f, res_t = self.setup_runner.summarize(self._out, verbose=False)
self.setup_failures += res_f
self.setup_tries += res_t
if self.test_runner.tries:
res_f, res_t = self.test_runner.summarize(self._out, verbose=show_successes)
self.total_failures += res_f
self.total_tries += res_t
if self.cleanup_runner.tries:
res_f, res_t = self.cleanup_runner.summarize(
self._out, verbose=show_successes
)
self.cleanup_failures += res_f
self.cleanup_tries += res_t
return success
def compile(
self, code: str, name: str, type: str, flags: Any, dont_inherit: bool
) -> Any:
return compile(code, name, self.type, flags, dont_inherit)
def test_group(self, group: TestGroup) -> bool:
ns: dict[str, Any] = {}
def run_setup_cleanup(
runner: Any, testcodes: list[TestCode], what: Any
) -> bool:
examples = []
for testcode in testcodes:
example = doctest.Example(testcode.code, '', lineno=testcode.lineno)
examples.append(example)
if not examples:
return True
# simulate a doctest with the code
sim_doctest = doctest.DocTest(
examples,
{},
f'{group.name} ({what} code)',
testcodes[0].filename,
0,
None,
)
sim_doctest.globs = ns
old_f = runner.failures
self.type = 'exec' # the snippet may contain multiple statements
runner.run(sim_doctest, out=self._warn_out, clear_globs=False)
return runner.failures <= old_f
# run the setup code
if not run_setup_cleanup(self.setup_runner, group.setup, 'setup'):
# if setup failed, don't run the group
return False
# run the tests
success = True
for code in group.tests:
if len(code) == 1:
# ordinary doctests (code/output interleaved)
try:
test = parser.get_doctest(
code[0].code, {}, group.name, code[0].filename, code[0].lineno
)
except Exception:
logger.warning(
__('ignoring invalid doctest code: %r'),
code[0].code,
location=(code[0].filename, code[0].lineno),
)
continue
if not test.examples:
continue
for example in test.examples:
# apply directive's comparison options
new_opt = code[0].options.copy()
new_opt.update(example.options)
example.options = new_opt
self.type = 'single' # as for ordinary doctests
else:
# testcode and output separate
output = code[1].code if code[1] else ''
options = code[1].options if code[1] else {}
# disable <BLANKLINE> processing as it is not needed
options[doctest.DONT_ACCEPT_BLANKLINE] = True
# find out if we're testing an exception
m = parser._EXCEPTION_RE.match(output) # type: ignore[attr-defined]
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
example = doctest.Example(
code[0].code,
output,
exc_msg=exc_msg,
lineno=code[0].lineno,
options=options,
)
test = doctest.DocTest(
[example],
{},
group.name,
code[0].filename,
code[0].lineno,
None,
)
self.type = 'exec' # multiple statements again
# DocTest.__init__ copies the globs namespace, which we don't want
test.globs = ns
old_f = self.test_runner.failures
# also don't clear the globs namespace after running the doctest
self.test_runner.run(test, out=self._warn_out, clear_globs=False)
if self.test_runner.failures > old_f:
success = False
if self.config.doctest_fail_fast:
break
# run the cleanup
if not run_setup_cleanup(self.cleanup_runner, group.cleanup, 'cleanup'):
return False
return success
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_directive('testsetup', TestsetupDirective)
app.add_directive('testcleanup', TestcleanupDirective)
app.add_directive('doctest', DoctestDirective)
app.add_directive('testcode', TestcodeDirective)
app.add_directive('testoutput', TestoutputDirective)
app.add_builder(DocTestBuilder)
# this config value adds to sys.path
app.add_config_value('doctest_show_successes', True, '', types=frozenset({bool}))
app.add_config_value('doctest_path', (), '', types=frozenset({list, tuple}))
app.add_config_value(
'doctest_test_doctest_blocks', 'default', '', types=frozenset({str})
)
app.add_config_value('doctest_global_setup', '', '', types=frozenset({str}))
app.add_config_value('doctest_global_cleanup', '', '', types=frozenset({str}))
app.add_config_value(
'doctest_default_flags',
doctest.DONT_ACCEPT_TRUE_FOR_1
| doctest.ELLIPSIS
| doctest.IGNORE_EXCEPTION_DETAIL,
'',
types=frozenset({int}),
)
app.add_config_value('doctest_fail_fast', False, '', types=frozenset({bool}))
return {
'version': sphinx.__display_version__,
'parallel_read_safe': True,
}
def _condition_default(node: Node) -> bool:
return (
isinstance(node, (nodes.literal_block, nodes.comment))
and 'testnodetype' in node
)
def _condition_with_doctest(node: Node) -> bool:
return _condition_default(node) or isinstance(node, nodes.doctest_block)
| DocTestBuilder |
python | spack__spack | lib/spack/spack/database.py | {
"start": 72819,
"end": 72929
} | class ____(SpackError):
"""Raised when attempting to add non-concrete spec to DB."""
| NonConcreteSpecAddError |
python | getsentry__sentry | src/flagpole/conditions.py | {
"start": 806,
"end": 1311
} | class ____(Exception):
pass
def get_type_name(value: Any):
return type(value).__name__
T = TypeVar("T", str, int, float)
def create_case_insensitive_set_from_list(values: list[T]) -> set[T]:
case_insensitive_set = set()
for value in values:
if isinstance(value, str):
case_insensitive_set.add(value.lower())
else:
case_insensitive_set.add(value)
return case_insensitive_set
@dataclasses.dataclass(frozen=True)
| ConditionTypeMismatchException |
python | networkx__networkx | networkx/algorithms/tests/test_lowest_common_ancestors.py | {
"start": 4930,
"end": 5852
} | class ____(TestTreeLCA):
@classmethod
def setup_class(cls):
cls.DG = nx.MultiDiGraph()
edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]
cls.DG.add_edges_from(edges)
cls.ans = dict(tree_all_pairs_lca(cls.DG, 0))
# add multiedges
cls.DG.add_edges_from(edges)
gold = {(n, n): n for n in cls.DG}
gold.update({(0, i): 0 for i in range(1, 7)})
gold.update(
{
(1, 2): 0,
(1, 3): 1,
(1, 4): 1,
(1, 5): 0,
(1, 6): 0,
(2, 3): 0,
(2, 4): 0,
(2, 5): 2,
(2, 6): 2,
(3, 4): 1,
(3, 5): 0,
(3, 6): 0,
(4, 5): 0,
(4, 6): 0,
(5, 6): 2,
}
)
cls.gold = gold
| TestMultiTreeLCA |
python | networkx__networkx | networkx/algorithms/flow/tests/test_gomory_hu.py | {
"start": 336,
"end": 4471
} | class ____:
def minimum_edge_weight(self, T, u, v):
path = nx.shortest_path(T, u, v, weight="weight")
return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:]))
def compute_cutset(self, G, T_orig, edge):
T = T_orig.copy()
T.remove_edge(*edge)
U, V = list(nx.connected_components(T))
cutset = set()
for x, nbrs in ((n, G[n]) for n in U):
cutset.update((x, y) for y in nbrs if y in V)
return cutset
def test_default_flow_function_karate_club_graph(self):
G = nx.karate_club_graph()
nx.set_edge_attributes(G, 1, "capacity")
T = nx.gomory_hu_tree(G)
assert nx.is_tree(T)
for u, v in combinations(G, 2):
cut_value, edge = self.minimum_edge_weight(T, u, v)
assert nx.minimum_cut_value(G, u, v) == cut_value
def test_karate_club_graph(self):
G = nx.karate_club_graph()
nx.set_edge_attributes(G, 1, "capacity")
for flow_func in flow_funcs:
T = nx.gomory_hu_tree(G, flow_func=flow_func)
assert nx.is_tree(T)
for u, v in combinations(G, 2):
cut_value, edge = self.minimum_edge_weight(T, u, v)
assert nx.minimum_cut_value(G, u, v) == cut_value
def test_davis_southern_women_graph(self):
G = nx.davis_southern_women_graph()
nx.set_edge_attributes(G, 1, "capacity")
for flow_func in flow_funcs:
T = nx.gomory_hu_tree(G, flow_func=flow_func)
assert nx.is_tree(T)
for u, v in combinations(G, 2):
cut_value, edge = self.minimum_edge_weight(T, u, v)
assert nx.minimum_cut_value(G, u, v) == cut_value
def test_florentine_families_graph(self):
G = nx.florentine_families_graph()
nx.set_edge_attributes(G, 1, "capacity")
for flow_func in flow_funcs:
T = nx.gomory_hu_tree(G, flow_func=flow_func)
assert nx.is_tree(T)
for u, v in combinations(G, 2):
cut_value, edge = self.minimum_edge_weight(T, u, v)
assert nx.minimum_cut_value(G, u, v) == cut_value
@pytest.mark.slow
def test_les_miserables_graph_cutset(self):
G = nx.les_miserables_graph()
nx.set_edge_attributes(G, 1, "capacity")
for flow_func in flow_funcs:
T = nx.gomory_hu_tree(G, flow_func=flow_func)
assert nx.is_tree(T)
for u, v in combinations(G, 2):
cut_value, edge = self.minimum_edge_weight(T, u, v)
assert nx.minimum_cut_value(G, u, v) == cut_value
def test_karate_club_graph_cutset(self):
G = nx.karate_club_graph()
nx.set_edge_attributes(G, 1, "capacity")
T = nx.gomory_hu_tree(G)
assert nx.is_tree(T)
u, v = 0, 33
cut_value, edge = self.minimum_edge_weight(T, u, v)
cutset = self.compute_cutset(G, T, edge)
assert cut_value == len(cutset)
def test_wikipedia_example(self):
# Example from https://en.wikipedia.org/wiki/Gomory%E2%80%93Hu_tree
G = nx.Graph()
G.add_weighted_edges_from(
(
(0, 1, 1),
(0, 2, 7),
(1, 2, 1),
(1, 3, 3),
(1, 4, 2),
(2, 4, 4),
(3, 4, 1),
(3, 5, 6),
(4, 5, 2),
)
)
for flow_func in flow_funcs:
T = nx.gomory_hu_tree(G, capacity="weight", flow_func=flow_func)
assert nx.is_tree(T)
for u, v in combinations(G, 2):
cut_value, edge = self.minimum_edge_weight(T, u, v)
assert nx.minimum_cut_value(G, u, v, capacity="weight") == cut_value
def test_directed_raises(self):
with pytest.raises(nx.NetworkXNotImplemented):
G = nx.DiGraph()
T = nx.gomory_hu_tree(G)
def test_empty_raises(self):
with pytest.raises(nx.NetworkXError):
G = nx.empty_graph()
T = nx.gomory_hu_tree(G)
| TestGomoryHuTree |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup_py38.py | {
"start": 6304,
"end": 6915
} | class ____(typing.Protocol):
def bazzle(self, y):
pass
@given(st.data())
def test_can_resolve_registered_protocol(data):
with temp_registered(
FooProtocol,
st.builds(SimpleNamespace, frozzle=st.functions(like=lambda x: ...)),
):
obj = data.draw(st.from_type(FooProtocol))
assert obj.frozzle(x=1) is None
def test_cannot_resolve_un_registered_protocol():
msg = "Instance and class checks can only be used with @runtime_checkable protocols"
with pytest.raises(TypeError, match=msg):
check_can_generate_examples(st.from_type(BarProtocol))
| BarProtocol |
python | coleifer__peewee | tests/reflection.py | {
"start": 894,
"end": 1004
} | class ____(TestModel):
nullable_cf = CharField(null=True)
nullable_if = IntegerField(null=True)
| Nullable |
python | huggingface__transformers | src/transformers/models/qwen2_vl/processing_qwen2_vl.py | {
"start": 1601,
"end": 12187
} | class ____(ProcessorMixin):
r"""
Constructs a Qwen2-VL processor which wraps a Qwen2-VL image processor and a Qwen2 tokenizer into a single processor.
[`Qwen2VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
[`~Qwen2VLProcessor.__call__`] and [`~Qwen2VLProcessor.decode`] for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`Qwen2VLVideoProcessor`], *optional*):
The video processor is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
self.video_token_id = (
tokenizer.video_token_id
if getattr(tokenizer, "video_token_id", None)
else tokenizer.convert_tokens_to_ids(self.video_token)
)
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
videos: Optional[VideoInput] = None,
**kwargs: Unpack[Qwen2VLProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwargs` arguments to
Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Qwen2VLProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
image_inputs = videos_inputs = {}
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
video_grid_thw = videos_inputs["video_grid_thw"]
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if images is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if videos is not None:
merge_length = self.video_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.video_token in text[i]:
num_video_tokens = video_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.video_token, "<|placeholder|>" * num_video_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.video_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Qwen2VLProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
if video_sizes is not None:
videos_kwargs = Qwen2VLProcessorKwargs._defaults.get("videos_kwargs", {})
videos_kwargs.update(kwargs)
num_video_patches = [
self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs)
for video_size in video_sizes
]
num_video_tokens = [(num_patches // merge_size**2) for num_patches in num_video_patches]
vision_data["num_video_tokens"] = num_video_tokens
return MultiModalData(**vision_data)
def post_process_image_text_to_text(
self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
):
"""
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
"""
return self.tokenizer.batch_decode(
generated_outputs,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
__all__ = ["Qwen2VLProcessor"]
| Qwen2VLProcessor |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_between.py | {
"start": 780,
"end": 10689
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.between"
condition_value_keys = (
"min_value",
"max_value",
"strict_min",
"strict_max",
)
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas( # noqa: C901 # FIXME CoP
cls,
column,
min_value=None,
max_value=None,
strict_min=None,
strict_max=None,
**kwargs,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None") # noqa: TRY003 # FIXME CoP
temp_column = column
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value") # noqa: TRY003 # FIXME CoP
# Use a vectorized approach for native numpy dtypes
if column.dtype in [int, float]:
return cls._pandas_vectorized(temp_column, min_value, max_value, strict_min, strict_max)
elif isinstance(column.dtype, pd.DatetimeTZDtype) or pd.api.types.is_datetime64_ns_dtype(
column.dtype
):
if min_value is not None and isinstance(min_value, str):
min_value = parse(min_value)
if max_value is not None and isinstance(max_value, str):
max_value = parse(max_value)
return cls._pandas_vectorized(temp_column, min_value, max_value, strict_min, strict_max)
def is_between(val): # noqa: C901, PLR0911, PLR0912 # FIXME CoP
# TODO Might be worth explicitly defining comparisons between types (for example, between strings and ints). # noqa: E501 # FIXME CoP
# Ensure types can be compared since some types in Python 3 cannot be logically compared. # noqa: E501 # FIXME CoP
# print type(val), type(min_value), type(max_value), val, min_value, max_value
if type(val) is None:
return False
if min_value is not None and max_value is not None:
# Type of column values is either string or specific rich type (or "None"). In all cases, type of # noqa: E501 # FIXME CoP
# column must match type of constant being compared to column value (otherwise, error is raised). # noqa: E501 # FIXME CoP
if (isinstance(val, str) != isinstance(min_value, str)) or (
isinstance(val, str) != isinstance(max_value, str)
):
raise TypeError( # noqa: TRY003 # FIXME CoP
"Column values, min_value, and max_value must either be None or of the same type." # noqa: E501 # FIXME CoP
)
if strict_min and strict_max:
return (val > min_value) and (val < max_value)
if strict_min:
return (val > min_value) and (val <= max_value)
if strict_max:
return (val >= min_value) and (val < max_value)
return (val >= min_value) and (val <= max_value)
elif min_value is None and max_value is not None:
# Type of column values is either string or specific rich type (or "None"). In all cases, type of # noqa: E501 # FIXME CoP
# column must match type of constant being compared to column value (otherwise, error is raised). # noqa: E501 # FIXME CoP
if isinstance(val, str) != isinstance(max_value, str):
raise TypeError( # noqa: TRY003 # FIXME CoP
"Column values, min_value, and max_value must either be None or of the same type." # noqa: E501 # FIXME CoP
)
if strict_max:
return val < max_value
return val <= max_value
elif min_value is not None and max_value is None:
# Type of column values is either string or specific rich type (or "None"). In all cases, type of # noqa: E501 # FIXME CoP
# column must match type of constant being compared to column value (otherwise, error is raised). # noqa: E501 # FIXME CoP
if isinstance(val, str) != isinstance(min_value, str):
raise TypeError( # noqa: TRY003 # FIXME CoP
"Column values, min_value, and max_value must either be None or of the same type." # noqa: E501 # FIXME CoP
)
if strict_min:
return val > min_value
return val >= min_value
else:
return False
return temp_column.map(is_between)
@classmethod
def _pandas_vectorized( # noqa: C901, PLR0911 # FIXME CoP
cls,
column: pd.Series,
min_value: Optional[Union[int, float, datetime.datetime]],
max_value: Optional[Union[int, float, datetime.datetime]],
strict_min: bool,
strict_max: bool,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None") # noqa: TRY003 # FIXME CoP
if min_value is None:
if strict_max:
return column < max_value
else:
return column <= max_value
if max_value is None:
if strict_min:
return min_value < column
else:
return min_value <= column
if strict_min and strict_max:
return (min_value < column) & (column < max_value)
elif strict_min:
return (min_value < column) & (column <= max_value)
elif strict_max:
return (min_value <= column) & (column < max_value)
else:
return (min_value <= column) & (column <= max_value)
@column_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy( # noqa: C901, PLR0911 # FIXME CoP
cls,
column,
min_value=None,
max_value=None,
strict_min=None,
strict_max=None,
**kwargs,
):
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value") # noqa: TRY003 # FIXME CoP
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None") # noqa: TRY003 # FIXME CoP
# Check that the generated SQL won't raise an error.
# ColumnValuesBetween metrics only work on numbers/dates,
# so we check for common string and boolean types.
# Retrieve column types from metrics.
metrics = kwargs.get("_metrics", {})
column_types = metrics.get("table.column_types", [])
# Map column names to their types as strings.
type_by_column = {ct.get("name"): str(ct.get("type", "")) for ct in column_types}
column_type = type_by_column.get(column.name)
INVALID_COLUMN_TYPES = (
"VARCHAR",
"CHAR",
"NVARCHAR",
"NCHAR",
"TEXT",
"STRING",
"BOOLEAN",
"BOOL",
"BIT",
"TINYTEXT",
"MEDIUMTEXT",
"LONGTEXT",
)
if column_type and column_type.upper().startswith(INVALID_COLUMN_TYPES):
raise InvalidColumnTypeError(column_type=column_type)
if min_value is None:
if strict_max:
return column < sa.literal(max_value)
return column <= sa.literal(max_value)
elif max_value is None:
if strict_min:
return column > sa.literal(min_value)
return column >= sa.literal(min_value)
else:
if strict_min and strict_max:
return sa.and_(
column > sa.literal(min_value),
column < sa.literal(max_value),
)
if strict_min:
return sa.and_(
column > sa.literal(min_value),
column <= sa.literal(max_value),
)
if strict_max:
return sa.and_(
column >= sa.literal(min_value),
column < sa.literal(max_value),
)
return sa.and_(
column >= sa.literal(min_value),
column <= sa.literal(max_value),
)
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark( # noqa: C901, PLR0911 # FIXME CoP
cls,
column,
min_value=None,
max_value=None,
strict_min=None,
strict_max=None,
**kwargs,
):
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value") # noqa: TRY003 # FIXME CoP
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None") # noqa: TRY003 # FIXME CoP
if min_value is None:
if strict_max:
return column < F.lit(max_value)
return column <= F.lit(max_value)
elif max_value is None:
if strict_min:
return column > F.lit(min_value)
return column >= F.lit(min_value)
else:
if strict_min and strict_max:
return (column > F.lit(min_value)) & (column < F.lit(max_value))
if strict_min:
return (column > F.lit(min_value)) & (column <= F.lit(max_value))
if strict_max:
return (column >= F.lit(min_value)) & (column < F.lit(max_value))
return (column >= F.lit(min_value)) & (column <= F.lit(max_value))
| ColumnValuesBetween |
python | bottlepy__bottle | bottle.py | {
"start": 7914,
"end": 7967
} | class ____(RouteError):
pass
| RouterUnknownModeError |
python | Netflix__metaflow | metaflow/_vendor/click/core.py | {
"start": 41282,
"end": 51577
} | class ____(Command):
"""A multi command is the basic implementation of a command that
dispatches to subcommands. The most common version is the
:class:`Group`.
:param invoke_without_command: this controls how the multi command itself
is invoked. By default it's only invoked
if a subcommand is provided.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is enabled by default if
`invoke_without_command` is disabled or disabled
if it's enabled. If enabled this will add
``--help`` as argument if no arguments are
passed.
:param subcommand_metavar: the string that is used in the documentation
to indicate the subcommand place.
:param chain: if this is set to `True` chaining of multiple subcommands
is enabled. This restricts the form of commands in that
they cannot have optional arguments but it allows
multiple commands to be chained together.
:param result_callback: the result callback to attach to this multi
command.
"""
allow_extra_args = True
allow_interspersed_args = False
def __init__(
self,
name=None,
invoke_without_command=False,
no_args_is_help=None,
subcommand_metavar=None,
chain=False,
result_callback=None,
**attrs
):
Command.__init__(self, name, **attrs)
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = SUBCOMMANDS_METAVAR
else:
subcommand_metavar = SUBCOMMAND_METAVAR
self.subcommand_metavar = subcommand_metavar
self.chain = chain
#: The result callback that is stored. This can be set or
#: overridden with the :func:`resultcallback` decorator.
self.result_callback = result_callback
if self.chain:
for param in self.params:
if isinstance(param, Argument) and not param.required:
raise RuntimeError(
"Multi commands in chain mode cannot have"
" optional arguments."
)
def collect_usage_pieces(self, ctx):
rv = Command.collect_usage_pieces(self, ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx, formatter):
Command.format_options(self, ctx, formatter)
self.format_commands(ctx, formatter)
def resultcallback(self, replace=False):
"""Adds a result callback to the chain command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.resultcallback()
def process_result(result, input):
return result + input
.. versionadded:: 3.0
:param replace: if set to `True` an already existing result
callback will be removed.
"""
def decorator(f):
old_callback = self.result_callback
if old_callback is None or replace:
self.result_callback = f
return f
def function(__value, *args, **kwargs):
return f(old_callback(__value, *args, **kwargs), *args, **kwargs)
self.result_callback = rv = update_wrapper(function, f)
return rv
return decorator
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section("Commands"):
formatter.write_dl(rows)
def parse_args(self, ctx, args):
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
rest = Command.parse_args(self, ctx, args)
if self.chain:
ctx.protected_args = rest
ctx.args = []
elif rest:
ctx.protected_args, ctx.args = rest[:1], rest[1:]
return ctx.args
def invoke(self, ctx):
def _process_result(value):
if self.result_callback is not None:
value = ctx.invoke(self.result_callback, value, **ctx.params)
return value
if not ctx.protected_args:
# If we are invoked without command the chain flag controls
# how this happens. If we are not in chain mode, the return
# value here is the return value of the command.
# If however we are in chain mode, the return value is the
# return value of the result processor invoked with an empty
# list (which means that no subcommand actually was executed).
if self.invoke_without_command:
if not self.chain:
return Command.invoke(self, ctx)
with ctx:
Command.invoke(self, ctx)
return _process_result([])
ctx.fail("Missing command.")
# Fetch args back out
args = ctx.protected_args + ctx.args
ctx.args = []
ctx.protected_args = []
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
ctx.invoked_subcommand = cmd_name
Command.invoke(self, ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = "*" if args else None
Command.invoke(self, ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
sub_ctx = cmd.make_context(
cmd_name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
)
contexts.append(sub_ctx)
args, sub_ctx.args = sub_ctx.args, []
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(self, ctx, args):
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None and not ctx.resilient_parsing:
if split_opt(cmd_name)[0]:
self.parse_args(ctx, ctx.args)
ctx.fail("No such command '{}'.".format(original_cmd_name))
return cmd_name, cmd, args[1:]
def get_command(self, ctx, cmd_name):
"""Given a context and a command name, this returns a
:class:`Command` object if it exists or returns `None`.
"""
raise NotImplementedError()
def list_commands(self, ctx):
"""Returns a list of subcommand names in the order they should
appear.
"""
return []
| MultiCommand |
python | getsentry__sentry | src/sentry/interfaces/spans.py | {
"start": 269,
"end": 1195
} | class ____(Interface):
"""
Holds timing spans related to APM and tracing.
>>> {
>>> 'trace_id': 'a0fa8803753e40fd8124b21eeb2986b5',
>>> 'parent_span_id': '9c2a6db8c79068a2',
>>> 'span_id': '8c931f4740435fb8',
>>> 'start_timestamp': '2019-06-14T14:01:41Z',
>>> 'same_process_as_parent': true,
>>> 'description': 'http://httpbin.org/base64/aGVsbG8gd29ybGQK GET',
>>> 'tags': { 'http.status_code': 200, 'error': false },
>>> 'timestamp': '2019-06-14T14:01:41Z',
>>> 'op': 'http',
>>> 'data': {
>>> 'url': 'http://httpbin.org/base64/aGVsbG8gd29ybGQK',
>>> 'status_code': 200,
>>> 'reason': 'OK',
>>> 'method': 'GET'
>>> }
>>> }
"""
@classmethod
def to_python(cls, data, **kwargs):
for key in SPAN_KEYS:
data.setdefault(key, None)
return super().to_python(data, **kwargs)
| Span |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 28893,
"end": 29354
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("fr_FR")
Faker.seed(0)
def test_vat_id(self):
for _ in range(100):
assert re.search(r"^FR[\w\d]{2} \d{9}$", self.fake.vat_id())
def test_ssn(self) -> None:
for _ in range(100):
assert re.search(r"^\d{15}$", self.fake.ssn())
def test_checksum(self) -> None:
assert fr_calculate_checksum(2570533063999) == 3
| TestFrFR |
python | hyperopt__hyperopt | hyperopt/pyll/base.py | {
"start": 17699,
"end": 19279
} | class ____(Apply):
def __init__(self, obj=None):
try:
o_len = len(obj)
except (AttributeError, TypeError):
# Note: AttributeError is raised on sklearn's
# RandomForestClassifier when used before fit
o_len = None
Apply.__init__(self, "literal", [], {}, o_len, pure=True)
self._obj = obj
def eval(self, memo=None):
if memo is None:
memo = {}
return memo.setdefault(id(self), self._obj)
@property
def obj(self):
return self._obj
@property
def arg(self):
return {}
def pprint(self, ofile, lineno=None, indent=0, memo=None):
if lineno is None:
lineno = [0]
if memo is None:
memo = {}
if self in memo:
print(lineno[0], " " * indent + memo[self], file=ofile)
else:
# TODO: set up a registry for this
if isinstance(self._obj, np.ndarray):
msg = "Literal{{np.ndarray,shape={},min={:f},max={:f}}}".format(
self._obj.shape,
self._obj.min(),
self._obj.max(),
)
else:
msg = "Literal{%s}" % str(self._obj)
memo[self] = "%s [line:%i]" % (msg, lineno[0])
print(lineno[0], " " * indent + msg, file=ofile)
lineno[0] += 1
def replace_input(self, old_node, new_node):
return []
def clone_from_inputs(self, inputs, o_len="same"):
return self.__class__(self._obj)
| Literal |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/extendee/package.py | {
"start": 217,
"end": 522
} | class ____(Package):
"""A package with extensions"""
homepage = "http://www.example.com"
url = "http://www.example.com/extendee-1.0.tar.gz"
extendable = True
version("1.0", md5="0123456789abcdef0123456789abcdef")
def install(self, spec, prefix):
mkdirp(prefix.bin)
| Extendee |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 4409,
"end": 4602
} | class ____:
params = [50, 1000, 10**5]
param_names = ["n"]
def setup(self, n):
self.s = Series(np.random.randn(n))
def time_clip(self, n):
self.s.clip(0, 1)
| Clip |
python | ApeWorX__ape | src/ape/api/query.py | {
"start": 3468,
"end": 3588
} | class ____(BaseModel):
columns: Sequence[str]
# TODO: Support "*" from getting the EcosystemAPI fields
| _BaseQuery |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 64677,
"end": 66865
} | class ____(TestCase):
def test_cube(self):
g = cube(3) # vert --> {v1, v2, v3}
vertices1 = OrderedSet(g)
self.assertEqual(len(vertices1), 8) # eight vertices
for edge in g.values():
self.assertEqual(len(edge), 3) # each vertex connects to three edges
vertices2 = OrderedSet(v for edges in g.values() for v in edges)
self.assertEqual(vertices1, vertices2) # edge vertices in original OrderedSet
cubefaces = faces(g)
self.assertEqual(len(cubefaces), 6) # six faces
for face in cubefaces:
self.assertEqual(len(face), 4) # each face is a square
def test_cuboctahedron(self):
# http://en.wikipedia.org/wiki/Cuboctahedron
# 8 triangular faces and 6 square faces
# 12 identical vertices each connecting a triangle and square
g = cube(3)
cuboctahedron = linegraph(g) # V( --> {V1, V2, V3, V4}
self.assertEqual(len(cuboctahedron), 12) # twelve vertices
vertices = OrderedSet(cuboctahedron)
for edges in cuboctahedron.values():
self.assertEqual(
len(edges), 4
) # each vertex connects to four other vertices
othervertices = OrderedSet(
edge for edges in cuboctahedron.values() for edge in edges
)
self.assertEqual(
vertices, othervertices
) # edge vertices in original OrderedSet
cubofaces = faces(cuboctahedron)
facesizes = collections.defaultdict(int)
for face in cubofaces:
facesizes[len(face)] += 1
self.assertEqual(facesizes[3], 8) # eight triangular faces
self.assertEqual(facesizes[4], 6) # six square faces
for vertex in cuboctahedron:
edge = vertex # Cuboctahedron vertices are edges in Cube
self.assertEqual(len(edge), 2) # Two cube vertices define an edge
for cubevert in edge:
self.assertIn(cubevert, g)
# ==============================================================================
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
run_tests()
| TestGraphs |
python | doocs__leetcode | solution/1300-1399/1311.Get Watched Videos by Your Friends/Solution.py | {
"start": 0,
"end": 652
} | class ____:
def watchedVideosByFriends(
self,
watchedVideos: List[List[str]],
friends: List[List[int]],
id: int,
level: int,
) -> List[str]:
q = deque([id])
vis = {id}
for _ in range(level):
for _ in range(len(q)):
i = q.popleft()
for j in friends[i]:
if j not in vis:
vis.add(j)
q.append(j)
cnt = Counter()
for i in q:
for v in watchedVideos[i]:
cnt[v] += 1
return sorted(cnt.keys(), key=lambda k: (cnt[k], k))
| Solution |
python | doocs__leetcode | solution/1400-1499/1428.Leftmost Column with at Least a One/Solution.py | {
"start": 237,
"end": 559
} | class ____:
def leftMostColumnWithOne(self, binaryMatrix: "BinaryMatrix") -> int:
m, n = binaryMatrix.dimensions()
ans = n
for i in range(m):
j = bisect_left(range(n), 1, key=lambda k: binaryMatrix.get(i, k))
ans = min(ans, j)
return -1 if ans >= n else ans
| Solution |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_us_state_abbreviation.py | {
"start": 775,
"end": 1832
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_state_abbreviation"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, dc_statehood=True, **kwargs):
return column.apply(lambda x: is_valid_state_abbreviation(x, dc_statehood))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidUSStateAbbreviation |
python | davidhalter__jedi | jedi/inference/value/klass.py | {
"start": 20483,
"end": 21880
} | class ____(ValueWrapper, ClassMixin):
"""
A class decorated with the ``dataclass_transform`` decorator. dataclass-like
semantics will be assumed for any class that directly or indirectly derives
from the decorated class or uses the decorated class as a metaclass.
Attributes on the decorated class and its base classes are not considered to
be fields.
"""
def __init__(self, wrapped_value):
super().__init__(wrapped_value)
def init_mode_from_new(self) -> bool:
"""Default value if missing is ``True``"""
new_methods = self._wrapped_value.py__getattribute__("__new__")
if not new_methods:
return True
new_method = list(new_methods)[0]
for param in new_method.get_param_names():
if (
param.string_name == "init"
and param.default_node
and param.default_node.type == "keyword"
):
if param.default_node.value == "False":
return False
elif param.default_node.value == "True":
return True
return True
@property
def init_mode_from_init_subclass(self) -> Optional[bool]:
# def __init_subclass__(cls) -> None: ... is hardcoded in the typeshed
# so the extra parameters can not be inferred.
return True
| DataclassTransformer |
python | kamyu104__LeetCode-Solutions | Python/minimum-insertion-steps-to-make-a-string-palindrome.py | {
"start": 31,
"end": 741
} | class ____(object):
def minInsertions(self, s):
"""
:type s: str
:rtype: int
"""
def longestCommonSubsequence(text1, text2):
if len(text1) < len(text2):
return self.longestCommonSubsequence(text2, text1)
dp = [[0 for _ in xrange(len(text2)+1)] for _ in xrange(2)]
for i in xrange(1, len(text1)+1):
for j in xrange(1, len(text2)+1):
dp[i%2][j] = dp[(i-1)%2][j-1]+1 if text1[i-1] == text2[j-1] \
else max(dp[(i-1)%2][j], dp[i%2][j-1])
return dp[len(text1)%2][len(text2)]
return len(s)-longestCommonSubsequence(s, s[::-1])
| Solution |
python | pennersr__django-allauth | allauth/account/views.py | {
"start": 29240,
"end": 30248
} | class ____(NextRedirectMixin, LogoutFunctionalityMixin, TemplateView):
template_name = "account/logout." + app_settings.TEMPLATE_EXTENSION
def get(self, *args, **kwargs):
if app_settings.LOGOUT_ON_GET:
return self.post(*args, **kwargs)
if not self.request.user.is_authenticated:
response = redirect(self.get_redirect_url())
return _ajax_response(self.request, response)
ctx = self.get_context_data()
response = self.render_to_response(ctx)
return _ajax_response(self.request, response)
def post(self, *args, **kwargs):
url = self.get_redirect_url()
self.logout()
response = redirect(url)
return _ajax_response(self.request, response)
def get_redirect_url(self):
return self.get_next_url() or get_adapter(self.request).get_logout_redirect_url(
self.request
)
logout = LogoutView.as_view()
@method_decorator(login_not_required, name="dispatch")
| LogoutView |
python | paramiko__paramiko | paramiko/buffered_pipe.py | {
"start": 1117,
"end": 1248
} | class ____(IOError):
"""
Indicates that a timeout was reached on a read from a `.BufferedPipe`.
"""
pass
| PipeTimeout |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_wx.py | {
"start": 35485,
"end": 38356
} | class ____(FigureManagerBase):
"""
Container/controller for the FigureCanvas and GUI frame.
It is instantiated by Gcf whenever a new figure is created. Gcf is
responsible for managing multiple instances of FigureManagerWx.
Attributes
----------
canvas : `FigureCanvas`
a FigureCanvasWx(wx.Panel) instance
window : wxFrame
a wxFrame instance - wxpython.org/Phoenix/docs/html/Frame.html
"""
def __init__(self, canvas, num, frame):
_log.debug("%s - __init__()", type(self))
self.frame = self.window = frame
super().__init__(canvas, num)
@classmethod
def create_with_canvas(cls, canvas_class, figure, num):
# docstring inherited
wxapp = wx.GetApp() or _create_wxapp()
frame = FigureFrameWx(num, figure, canvas_class=canvas_class)
manager = figure.canvas.manager
if mpl.is_interactive():
manager.frame.Show()
figure.canvas.draw_idle()
return manager
@classmethod
def start_main_loop(cls):
if not wx.App.IsMainLoopRunning():
wxapp = wx.GetApp()
if wxapp is not None:
wxapp.MainLoop()
def show(self):
# docstring inherited
self.frame.Show()
self.canvas.draw()
if mpl.rcParams['figure.raise_window']:
self.frame.Raise()
def destroy(self, *args):
# docstring inherited
_log.debug("%s - destroy()", type(self))
frame = self.frame
if frame: # Else, may have been already deleted, e.g. when closing.
# As this can be called from non-GUI thread from plt.close use
# wx.CallAfter to ensure thread safety.
wx.CallAfter(frame.Close)
super().destroy()
def full_screen_toggle(self):
# docstring inherited
self.frame.ShowFullScreen(not self.frame.IsFullScreen())
def get_window_title(self):
# docstring inherited
return self.window.GetTitle()
def set_window_title(self, title):
# docstring inherited
self.window.SetTitle(title)
def resize(self, width, height):
# docstring inherited
# Directly using SetClientSize doesn't handle the toolbar on Windows.
self.window.SetSize(self.window.ClientToWindowSize(wx.Size(
math.ceil(width), math.ceil(height))))
def _load_bitmap(filename):
"""
Load a wx.Bitmap from a file in the "images" directory of the Matplotlib
data.
"""
return wx.Bitmap(str(cbook._get_data_path('images', filename)))
def _set_frame_icon(frame):
bundle = wx.IconBundle()
for image in ('matplotlib.png', 'matplotlib_large.png'):
icon = wx.Icon(_load_bitmap(image))
if not icon.IsOk():
return
bundle.AddIcon(icon)
frame.SetIcons(bundle)
| FigureManagerWx |
python | pypa__pip | tests/unit/test_req_file.py | {
"start": 24006,
"end": 24531
} | class ____:
def test_no_args(self) -> None:
assert ("", "--option") == break_args_options("--option")
def test_no_options(self) -> None:
assert ("arg arg", "") == break_args_options("arg arg")
def test_args_short_options(self) -> None:
result = break_args_options("arg arg -s")
assert ("arg arg", "-s") == result
def test_args_long_options(self) -> None:
result = break_args_options("arg arg --long")
assert ("arg arg", "--long") == result
| TestBreakOptionsArgs |
python | simonw__datasette | datasette/utils/asgi.py | {
"start": 1301,
"end": 4327
} | class ____:
def __init__(self, scope, receive):
self.scope = scope
self.receive = receive
def __repr__(self):
return '<asgi.Request method="{}" url="{}">'.format(self.method, self.url)
@property
def method(self):
return self.scope["method"]
@property
def url(self):
return urlunparse(
(self.scheme, self.host, self.path, None, self.query_string, None)
)
@property
def url_vars(self):
return (self.scope.get("url_route") or {}).get("kwargs") or {}
@property
def scheme(self):
return self.scope.get("scheme") or "http"
@property
def headers(self):
return {
k.decode("latin-1").lower(): v.decode("latin-1")
for k, v in self.scope.get("headers") or []
}
@property
def host(self):
return self.headers.get("host") or "localhost"
@property
def cookies(self):
cookies = SimpleCookie()
cookies.load(self.headers.get("cookie", ""))
return {key: value.value for key, value in cookies.items()}
@property
def path(self):
if self.scope.get("raw_path") is not None:
return self.scope["raw_path"].decode("latin-1").partition("?")[0]
else:
path = self.scope["path"]
if isinstance(path, str):
return path
else:
return path.decode("utf-8")
@property
def query_string(self):
return (self.scope.get("query_string") or b"").decode("latin-1")
@property
def full_path(self):
qs = self.query_string
return "{}{}".format(self.path, ("?" + qs) if qs else "")
@property
def args(self):
return MultiParams(parse_qs(qs=self.query_string, keep_blank_values=True))
@property
def actor(self):
return self.scope.get("actor", None)
async def post_body(self):
body = b""
more_body = True
while more_body:
message = await self.receive()
assert message["type"] == "http.request", message
body += message.get("body", b"")
more_body = message.get("more_body", False)
return body
async def post_vars(self):
body = await self.post_body()
return dict(parse_qsl(body.decode("utf-8"), keep_blank_values=True))
@classmethod
def fake(cls, path_with_query_string, method="GET", scheme="http", url_vars=None):
"""Useful for constructing Request objects for tests"""
path, _, query_string = path_with_query_string.partition("?")
scope = {
"http_version": "1.1",
"method": method,
"path": path,
"raw_path": path_with_query_string.encode("latin-1"),
"query_string": query_string.encode("latin-1"),
"scheme": scheme,
"type": "http",
}
if url_vars:
scope["url_route"] = {"kwargs": url_vars}
return cls(scope, None)
| Request |
python | graphql-python__graphene | graphene/utils/props.py | {
"start": 28,
"end": 229
} | class ____:
pass
_all_vars = set(dir(_OldClass) + dir(_NewClass))
def props(x):
return {
key: vars(x).get(key, getattr(x, key)) for key in dir(x) if key not in _all_vars
}
| _NewClass |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydoclint/DOC202_numpy.py | {
"start": 805,
"end": 998
} | class ____(metaclass=abc.abcmeta):
@abc.abstractmethod
def f(self):
"""Lorem ipsum
Returns
-------
dict:
The values
"""
return
| A |
python | django__django | django/db/migrations/operations/base.py | {
"start": 91,
"end": 239
} | class ____(str, enum.Enum):
ADDITION = "+"
REMOVAL = "-"
ALTERATION = "~"
PYTHON = "p"
SQL = "s"
MIXED = "?"
| OperationCategory |
python | pyqtgraph__pyqtgraph | pyqtgraph/Qt/internals.py | {
"start": 342,
"end": 388
} | class ____(ctypes.Structure):
pass
| QArrayData |
python | doocs__leetcode | solution/0100-0199/0161.One Edit Distance/Solution.py | {
"start": 0,
"end": 395
} | class ____:
def isOneEditDistance(self, s: str, t: str) -> bool:
if len(s) < len(t):
return self.isOneEditDistance(t, s)
m, n = len(s), len(t)
if m - n > 1:
return False
for i, c in enumerate(t):
if c != s[i]:
return s[i + 1 :] == t[i + 1 :] if m == n else s[i + 1 :] == t[i:]
return m == n + 1
| Solution |
python | scipy__scipy | scipy/fft/_pocketfft/tests/test_basic.py | {
"start": 24941,
"end": 26751
} | class ____:
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
rng = np.random.default_rng(1234)
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = ifftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
x = rng.random((20, 26))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
x = rng.random((5, 4, 3, 20))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
@pytest.mark.parametrize('maxnlp', [2000, 3500])
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random_complex(self, maxnlp, size):
rng = np.random.default_rng(1234)
x = rng.random([size, size]) + 1j * rng.random([size, size])
assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
ifftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
ifftn([[1, 1], [2, 2]], (4, -3))
def test_no_axes(self):
x = numpy.random.random((2,2,2))
assert_allclose(ifftn(x, axes=[]), x, atol=1e-7)
| TestIfftn |
python | great-expectations__great_expectations | great_expectations/render/components.py | {
"start": 20186,
"end": 21435
} | class ____(RenderedComponentContent):
def __init__(
self,
value_list,
header=None,
subheader=None,
styling=None,
content_block_type="value_list",
) -> None:
super().__init__(content_block_type=content_block_type, styling=styling)
self.header = header
self.subheader = subheader
self.value_list = value_list
@override
def to_json_dict(self) -> dict[str, JSONValues]:
"""Returns a JSON-serializable dict representation of this ValueListContent.
Returns:
A JSON-serializable dict representation of this ValueListContent.
"""
d = super().to_json_dict()
if self.header is not None:
if isinstance(self.header, RenderedContent):
d["header"] = self.header.to_json_dict()
else:
d["header"] = self.header
if self.subheader is not None:
if isinstance(self.subheader, RenderedContent):
d["subheader"] = self.subheader.to_json_dict()
else:
d["subheader"] = self.subheader
d["value_list"] = RenderedContent.rendered_content_list_to_json(self.value_list)
return d
| ValueListContent |
python | pytorch__pytorch | torch/utils/data/datapipes/datapipe.py | {
"start": 15435,
"end": 16416
} | class ____:
def __init__(self, datapipe) -> None:
self._datapipe = datapipe
def __getstate__(self):
use_dill = False
try:
value = pickle.dumps(self._datapipe)
except Exception:
if HAS_DILL:
# pyrefly: ignore [missing-attribute]
value = dill.dumps(self._datapipe)
use_dill = True
else:
raise
return (value, use_dill)
def __setstate__(self, state):
value, use_dill = state
if use_dill:
# pyrefly: ignore [missing-attribute]
self._datapipe = dill.loads(value)
else:
self._datapipe = pickle.loads(value)
def __len__(self) -> int:
try:
return len(self._datapipe)
except Exception as e:
raise TypeError(
f"{type(self).__name__} instance doesn't have valid length"
) from e
| _DataPipeSerializationWrapper |
python | kubernetes-client__python | kubernetes/client/models/v1_fc_volume_source.py | {
"start": 383,
"end": 7349
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'lun': 'int',
'read_only': 'bool',
'target_ww_ns': 'list[str]',
'wwids': 'list[str]'
}
attribute_map = {
'fs_type': 'fsType',
'lun': 'lun',
'read_only': 'readOnly',
'target_ww_ns': 'targetWWNs',
'wwids': 'wwids'
}
def __init__(self, fs_type=None, lun=None, read_only=None, target_ww_ns=None, wwids=None, local_vars_configuration=None): # noqa: E501
"""V1FCVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._fs_type = None
self._lun = None
self._read_only = None
self._target_ww_ns = None
self._wwids = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if lun is not None:
self.lun = lun
if read_only is not None:
self.read_only = read_only
if target_ww_ns is not None:
self.target_ww_ns = target_ww_ns
if wwids is not None:
self.wwids = wwids
@property
def fs_type(self):
"""Gets the fs_type of this V1FCVolumeSource. # noqa: E501
fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:return: The fs_type of this V1FCVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1FCVolumeSource.
fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:param fs_type: The fs_type of this V1FCVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def lun(self):
"""Gets the lun of this V1FCVolumeSource. # noqa: E501
lun is Optional: FC target lun number # noqa: E501
:return: The lun of this V1FCVolumeSource. # noqa: E501
:rtype: int
"""
return self._lun
@lun.setter
def lun(self, lun):
"""Sets the lun of this V1FCVolumeSource.
lun is Optional: FC target lun number # noqa: E501
:param lun: The lun of this V1FCVolumeSource. # noqa: E501
:type: int
"""
self._lun = lun
@property
def read_only(self):
"""Gets the read_only of this V1FCVolumeSource. # noqa: E501
readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:return: The read_only of this V1FCVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1FCVolumeSource.
readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:param read_only: The read_only of this V1FCVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def target_ww_ns(self):
"""Gets the target_ww_ns of this V1FCVolumeSource. # noqa: E501
targetWWNs is Optional: FC target worldwide names (WWNs) # noqa: E501
:return: The target_ww_ns of this V1FCVolumeSource. # noqa: E501
:rtype: list[str]
"""
return self._target_ww_ns
@target_ww_ns.setter
def target_ww_ns(self, target_ww_ns):
"""Sets the target_ww_ns of this V1FCVolumeSource.
targetWWNs is Optional: FC target worldwide names (WWNs) # noqa: E501
:param target_ww_ns: The target_ww_ns of this V1FCVolumeSource. # noqa: E501
:type: list[str]
"""
self._target_ww_ns = target_ww_ns
@property
def wwids(self):
"""Gets the wwids of this V1FCVolumeSource. # noqa: E501
wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. # noqa: E501
:return: The wwids of this V1FCVolumeSource. # noqa: E501
:rtype: list[str]
"""
return self._wwids
@wwids.setter
def wwids(self, wwids):
"""Sets the wwids of this V1FCVolumeSource.
wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. # noqa: E501
:param wwids: The wwids of this V1FCVolumeSource. # noqa: E501
:type: list[str]
"""
self._wwids = wwids
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1FCVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1FCVolumeSource):
return True
return self.to_dict() != other.to_dict()
| V1FCVolumeSource |
python | kamyu104__LeetCode-Solutions | Python/find-all-groups-of-farmland.py | {
"start": 33,
"end": 758
} | class ____(object):
def findFarmland(self, land):
"""
:type land: List[List[int]]
:rtype: List[List[int]]
"""
result = []
for i in xrange(len(land)):
for j in xrange(len(land[0])):
if land[i][j] != 1:
continue
ni, nj = i, j
while ni+1 < len(land) and land[ni+1][j] == 1:
ni += 1
while nj+1 < len(land[0]) and land[i][nj+1] == 1:
nj += 1
for r in xrange(i, ni+1):
for c in xrange(j, nj+1):
land[r][c] = -1
result.append([i, j, ni, nj])
return result
| Solution |
python | getsentry__sentry | tests/sentry/web/frontend/test_project_event.py | {
"start": 2320,
"end": 3526
} | class ____(SnubaTestCase, TestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.login_as(self.user)
self.org = self.create_organization()
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
self.project = self.create_project(organization=self.org, teams=[self.team])
min_ago = before_now(minutes=1).isoformat()
self.event = self.store_event(
data={"fingerprint": ["group1"], "timestamp": min_ago}, project_id=self.project.id
)
@with_feature("system:multi-region")
def test_redirect_to_event_customer_domain(self) -> None:
self.org.refresh_from_db()
resp = self.client.get(
reverse(
"sentry-project-event-redirect",
args=[self.org.slug, self.project.slug, self.event.event_id],
)
)
assert (
resp["Location"]
== f"http://{self.org.slug}.testserver/issues/{self.event.group_id}/events/{self.event.event_id}/"
)
| ProjectEventCustomerDomainTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/base/llms/types.py | {
"start": 4595,
"end": 7435
} | class ____(BaseModel):
"""A representation of audio data to directly pass to/from the LLM."""
block_type: Literal["audio"] = "audio"
audio: bytes | IOBase | None = None
path: FilePath | None = None
url: AnyUrl | str | None = None
format: str | None = None
model_config = ConfigDict(arbitrary_types_allowed=True)
@field_validator("url", mode="after")
@classmethod
def urlstr_to_anyurl(cls, url: str | AnyUrl) -> AnyUrl:
"""Store the url as Anyurl."""
if isinstance(url, AnyUrl):
return url
return AnyUrl(url=url)
@field_serializer("audio")
def serialize_audio(self, audio: bytes | IOBase | None) -> bytes | None:
"""Serialize the audio field."""
if isinstance(audio, bytes):
return audio
if isinstance(audio, IOBase):
audio.seek(0)
return audio.read()
return None
@model_validator(mode="after")
def audio_to_base64(self) -> Self:
"""
Store the audio as base64 and guess the mimetype when possible.
In case the model was built passing audio data but without a mimetype,
we try to guess it using the filetype library. To avoid resource-intense
operations, we won't load the path or the URL to guess the mimetype.
"""
if not self.audio or not isinstance(self.audio, bytes):
return self
try:
# Check if audio is already base64 encoded
decoded_audio = base64.b64decode(self.audio)
except Exception:
decoded_audio = self.audio
# Not base64 - encode it
self.audio = base64.b64encode(self.audio)
self._guess_format(decoded_audio)
return self
def _guess_format(self, audio_data: bytes) -> None:
if not self.format:
guess = filetype.guess(audio_data)
self.format = guess.extension if guess else None
def resolve_audio(self, as_base64: bool = False) -> IOBase:
"""
Resolve an audio such that PIL can read it.
Args:
as_base64 (bool): whether the resolved audio should be returned as base64-encoded bytes
"""
data_buffer = (
self.audio
if isinstance(self.audio, IOBase)
else resolve_binary(
raw_bytes=self.audio,
path=self.path,
url=str(self.url) if self.url else None,
as_base64=as_base64,
)
)
# Check size by seeking to end and getting position
data_buffer.seek(0, 2) # Seek to end
size = data_buffer.tell()
data_buffer.seek(0) # Reset to beginning
if size == 0:
raise ValueError("resolve_audio returned zero bytes")
return data_buffer
| AudioBlock |
python | h5py__h5py | h5py/tests/test_group.py | {
"start": 29188,
"end": 30114
} | class ____(BaseGroup):
"""
Feature: Create and manage soft links with the high-level interface
"""
def test_spath(self):
""" SoftLink path attribute """
sl = SoftLink('/foo')
self.assertEqual(sl.path, '/foo')
def test_srepr(self):
""" SoftLink path repr """
sl = SoftLink('/foo')
self.assertIsInstance(repr(sl), str)
def test_create(self):
""" Create new soft link by assignment """
new = make_name("new")
alias = make_name("alias")
g = self.f.create_group(new)
sl = SoftLink(f"/{new}")
self.f[alias] = sl
g2 = self.f[alias]
self.assertEqual(g, g2)
def test_exc(self):
""" Opening dangling soft link results in KeyError """
name = make_name()
self.f[name] = SoftLink('new')
with self.assertRaises(KeyError):
self.f[name]
| TestSoftLinks |
python | streamlit__streamlit | lib/streamlit/elements/snow.py | {
"start": 884,
"end": 1439
} | class ____:
@gather_metrics("snow")
def snow(self) -> DeltaGenerator:
"""Draw celebratory snowfall.
Example
-------
>>> import streamlit as st
>>>
>>> st.snow()
...then watch your app and get ready for a cool celebration!
"""
snow_proto = SnowProto()
snow_proto.show = True
return self.dg._enqueue("snow", snow_proto)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| SnowMixin |
python | pytorch__pytorch | test/dynamo/test_sets.py | {
"start": 21632,
"end": 21882
} | class ____(_SetBase, _BaseSetTests):
class CustomSet(set):
pass
thetype = CustomSet
def test_in_frozenset(self):
super().test_in_frozenset()
def test_equality(self):
super().test_equality()
| UserDefinedSetTests |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 113100,
"end": 113251
} | class ____(MaybeAlignPartitions):
_parameters = ["frame", "other", "func", "fill_value", "overwrite"]
_expr_cls = CombineSeries
| CombineFrameAlign |
python | run-llama__llama_index | llama-index-core/tests/program/test_multi_modal_llm_program.py | {
"start": 906,
"end": 1596
} | class ____(BaseModel):
__test__ = False
hello: str
def test_multi_modal_llm_program(image_url: str) -> None:
"""Test Multi Modal LLM Pydantic program."""
output_parser = PydanticOutputParser(output_cls=TestModel)
multi_modal_llm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=output_parser,
prompt_template_str="This is a test prompt with a {test_input}.",
multi_modal_llm=MagicLLM(),
image_documents=[ImageBlock(url=image_url)],
)
# mock Multi Modal llm
obj_output = multi_modal_llm_program(test_input="hello")
assert isinstance(obj_output, TestModel)
assert obj_output.hello == "world"
| TestModel |
python | sympy__sympy | sympy/solvers/simplex.py | {
"start": 2908,
"end": 33829
} | class ____(Exception):
"""
A linear programming problem is considered infeasible if its
constraint set is empty. That is, if the set of all vectors
satisfying the constraints is empty, then the problem is infeasible.
Example
=======
Suppose you want to maximize
x
subject to
x >= 10
x <= 9
No x can satisfy those constraints.
"""
pass
def _pivot(M, i, j):
"""
The pivot element `M[i, j]` is inverted and the rest of the matrix
modified and returned as a new matrix; original is left unmodified.
Example
=======
>>> from sympy.matrices.dense import Matrix
>>> from sympy.solvers.simplex import _pivot
>>> from sympy import var
>>> Matrix(3, 3, var('a:i'))
Matrix([
[a, b, c],
[d, e, f],
[g, h, i]])
>>> _pivot(_, 1, 0)
Matrix([
[-a/d, -a*e/d + b, -a*f/d + c],
[ 1/d, e/d, f/d],
[-g/d, h - e*g/d, i - f*g/d]])
"""
Mi, Mj, Mij = M[i, :], M[:, j], M[i, j]
if Mij == 0:
raise ZeroDivisionError(
"Tried to pivot about zero-valued entry.")
A = M - Mj * (Mi / Mij)
A[i, :] = Mi / Mij
A[:, j] = -Mj / Mij
A[i, j] = 1 / Mij
return A
def _choose_pivot_row(A, B, candidate_rows, pivot_col, Y):
# Choose row with smallest ratio
# If there are ties, pick using Bland's rule
return min(candidate_rows, key=lambda i: (B[i] / A[i, pivot_col], Y[i]))
def _simplex(A, B, C, D=None, dual=False):
"""Return ``(o, x, y)`` obtained from the two-phase simplex method
using Bland's rule: ``o`` is the minimum value of primal,
``Cx - D``, under constraints ``Ax <= B`` (with ``x >= 0``) and
the maximum of the dual, ``y^{T}B - D``, under constraints
``A^{T}*y >= C^{T}`` (with ``y >= 0``). To compute the dual of
the system, pass `dual=True` and ``(o, y, x)`` will be returned.
Note: the nonnegative constraints for ``x`` and ``y`` supercede
any values of ``A`` and ``B`` that are inconsistent with that
assumption, so if a constraint of ``x >= -1`` is represented
in ``A`` and ``B``, no value will be obtained that is negative; if
a constraint of ``x <= -1`` is represented, an error will be
raised since no solution is possible.
This routine relies on the ability of determining whether an
expression is 0 or not. This is guaranteed if the input contains
only Float or Rational entries. It will raise a TypeError if
a relationship does not evaluate to True or False.
Examples
========
>>> from sympy.solvers.simplex import _simplex
>>> from sympy import Matrix
Consider the simple minimization of ``f = x + y + 1`` under the
constraint that ``y + 2*x >= 4``. This is the "standard form" of
a minimization.
In the nonnegative quadrant, this inequality describes a area above
a triangle with vertices at (0, 4), (0, 0) and (2, 0). The minimum
of ``f`` occurs at (2, 0). Define A, B, C, D for the standard
minimization:
>>> A = Matrix([[2, 1]])
>>> B = Matrix([4])
>>> C = Matrix([[1, 1]])
>>> D = Matrix([-1])
Confirm that this is the system of interest:
>>> from sympy.abc import x, y
>>> X = Matrix([x, y])
>>> (C*X - D)[0]
x + y + 1
>>> [i >= j for i, j in zip(A*X, B)]
[2*x + y >= 4]
Since `_simplex` will do a minimization for constraints given as
``A*x <= B``, the signs of ``A`` and ``B`` must be negated since
the currently correspond to a greater-than inequality:
>>> _simplex(-A, -B, C, D)
(3, [2, 0], [1/2])
The dual of minimizing ``f`` is maximizing ``F = c*y - d`` for
``a*y <= b`` where ``a``, ``b``, ``c``, ``d`` are derived from the
transpose of the matrix representation of the standard minimization:
>>> tr = lambda a, b, c, d: [i.T for i in (a, c, b, d)]
>>> a, b, c, d = tr(A, B, C, D)
This time ``a*x <= b`` is the expected inequality for the `_simplex`
method, but to maximize ``F``, the sign of ``c`` and ``d`` must be
changed (so that minimizing the negative will give the negative of
the maximum of ``F``):
>>> _simplex(a, b, -c, -d)
(-3, [1/2], [2, 0])
The negative of ``F`` and the min of ``f`` are the same. The dual
point `[1/2]` is the value of ``y`` that minimized ``F = c*y - d``
under constraints a*x <= b``:
>>> y = Matrix(['y'])
>>> (c*y - d)[0]
4*y + 1
>>> [i <= j for i, j in zip(a*y,b)]
[2*y <= 1, y <= 1]
In this 1-dimensional dual system, the more restrictive constraint is
the first which limits ``y`` between 0 and 1/2 and the maximum of
``F`` is attained at the nonzero value, hence is ``4*(1/2) + 1 = 3``.
In this case the values for ``x`` and ``y`` were the same when the
dual representation was solved. This is not always the case (though
the value of the function will be the same).
>>> l = [[1, 1], [-1, 1], [0, 1], [-1, 0]], [5, 1, 2, -1], [[1, 1]], [-1]
>>> A, B, C, D = [Matrix(i) for i in l]
>>> _simplex(A, B, -C, -D)
(-6, [3, 2], [1, 0, 0, 0])
>>> _simplex(A, B, -C, -D, dual=True) # [5, 0] != [3, 2]
(-6, [1, 0, 0, 0], [5, 0])
In both cases the function has the same value:
>>> Matrix(C)*Matrix([3, 2]) == Matrix(C)*Matrix([5, 0])
True
See Also
========
_lp - poses min/max problem in form compatible with _simplex
lpmin - minimization which calls _lp
lpmax - maximimzation which calls _lp
References
==========
.. [1] Thomas S. Ferguson, LINEAR PROGRAMMING: A Concise Introduction
web.tecnico.ulisboa.pt/mcasquilho/acad/or/ftp/FergusonUCLA_lp.pdf
"""
A, B, C, D = [Matrix(i) for i in (A, B, C, D or [0])]
if dual:
_o, d, p = _simplex(-A.T, C.T, B.T, -D)
return -_o, d, p
if A and B:
M = Matrix([[A, B], [C, D]])
else:
if A or B:
raise ValueError("must give A and B")
# no constraints given
M = Matrix([[C, D]])
n = M.cols - 1
m = M.rows - 1
if not all(i.is_Float or i.is_Rational for i in M):
# with literal Float and Rational we are guaranteed the
# ability of determining whether an expression is 0 or not
raise TypeError(filldedent("""
Only rationals and floats are allowed.
"""
)
)
# x variables have priority over y variables during Bland's rule
# since False < True
X = [(False, j) for j in range(n)]
Y = [(True, i) for i in range(m)]
# Phase 1: find a feasible solution or determine none exist
while True:
B = M[:-1, -1]
A = M[:-1, :-1]
if all(B[i] >= 0 for i in range(B.rows)):
# We have found a feasible solution
break
# Find k: first row with a negative rightmost entry
for k in range(B.rows):
if B[k] < 0:
break # use current value of k below
else:
pass # error will raise below
# Choose pivot column, c
piv_cols = [_ for _ in range(A.cols) if A[k, _] < 0]
if not piv_cols:
raise InfeasibleLPError(filldedent("""
The constraint set is empty!"""))
_, c = min((X[i], i) for i in piv_cols) # Bland's rule
# Choose pivot row, r
piv_rows = [_ for _ in range(A.rows) if A[_, c] > 0 and B[_] >= 0]
piv_rows.append(k)
r = _choose_pivot_row(A, B, piv_rows, c, Y)
M = _pivot(M, r, c)
X[c], Y[r] = Y[r], X[c]
# Phase 2: from a feasible solution, pivot to optimal
while True:
B = M[:-1, -1]
A = M[:-1, :-1]
C = M[-1, :-1]
# Choose a pivot column, c
piv_cols = [_ for _ in range(n) if C[_] < 0]
if not piv_cols:
break
_, c = min((X[i], i) for i in piv_cols) # Bland's rule
# Choose a pivot row, r
piv_rows = [_ for _ in range(m) if A[_, c] > 0]
if not piv_rows:
raise UnboundedLPError(filldedent("""
Objective function can assume
arbitrarily large values!"""))
r = _choose_pivot_row(A, B, piv_rows, c, Y)
M = _pivot(M, r, c)
X[c], Y[r] = Y[r], X[c]
argmax = [None] * n
argmin_dual = [None] * m
for i, (v, n) in enumerate(X):
if v == False:
argmax[n] = S.Zero
else:
argmin_dual[n] = M[-1, i]
for i, (v, n) in enumerate(Y):
if v == True:
argmin_dual[n] = S.Zero
else:
argmax[n] = M[i, -1]
return -M[-1, -1], argmax, argmin_dual
## routines that use _simplex or support those that do
def _abcd(M, list=False):
"""return parts of M as matrices or lists
Examples
========
>>> from sympy import Matrix
>>> from sympy.solvers.simplex import _abcd
>>> m = Matrix(3, 3, range(9)); m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a, b, c, d = _abcd(m)
>>> a
Matrix([
[0, 1],
[3, 4]])
>>> b
Matrix([
[2],
[5]])
>>> c
Matrix([[6, 7]])
>>> d
Matrix([[8]])
The matrices can be returned as compact lists, too:
>>> L = a, b, c, d = _abcd(m, list=True); L
([[0, 1], [3, 4]], [2, 5], [[6, 7]], [8])
"""
def aslist(i):
l = i.tolist()
if len(l[0]) == 1: # col vector
return [i[0] for i in l]
return l
m = M[:-1, :-1], M[:-1, -1], M[-1, :-1], M[-1:, -1:]
if not list:
return m
return tuple([aslist(i) for i in m])
def _m(a, b, c, d=None):
"""return Matrix([[a, b], [c, d]]) from matrices
in Matrix or list form.
Examples
========
>>> from sympy import Matrix
>>> from sympy.solvers.simplex import _abcd, _m
>>> m = Matrix(3, 3, range(9))
>>> L = _abcd(m, list=True); L
([[0, 1], [3, 4]], [2, 5], [[6, 7]], [8])
>>> _abcd(m)
(Matrix([
[0, 1],
[3, 4]]), Matrix([
[2],
[5]]), Matrix([[6, 7]]), Matrix([[8]]))
>>> assert m == _m(*L) == _m(*_)
"""
a, b, c, d = [Matrix(i) for i in (a, b, c, d or [0])]
return Matrix([[a, b], [c, d]])
def _primal_dual(M, factor=True):
"""return primal and dual function and constraints
assuming that ``M = Matrix([[A, b], [c, d]])`` and the
function ``c*x - d`` is being minimized with ``Ax >= b``
for nonnegative values of ``x``. The dual and its
constraints will be for maximizing `b.T*y - d` subject
to ``A.T*y <= c.T``.
Examples
========
>>> from sympy.solvers.simplex import _primal_dual, lpmin, lpmax
>>> from sympy import Matrix
The following matrix represents the primal task of
minimizing x + y + 7 for y >= x + 1 and y >= -2*x + 3.
The dual task seeks to maximize x + 3*y + 7 with
2*y - x <= 1 and and x + y <= 1:
>>> M = Matrix([
... [-1, 1, 1],
... [ 2, 1, 3],
... [ 1, 1, -7]])
>>> p, d = _primal_dual(M)
The minimum of the primal and maximum of the dual are the same
(though they occur at different points):
>>> lpmin(*p)
(28/3, {x1: 2/3, x2: 5/3})
>>> lpmax(*d)
(28/3, {y1: 1/3, y2: 2/3})
If the equivalent (but canonical) inequalities are
desired, leave `factor=True`, otherwise the unmodified
inequalities for M will be returned.
>>> m = Matrix([
... [-3, -2, 4, -2],
... [ 2, 0, 0, -2],
... [ 0, 1, -3, 0]])
>>> _primal_dual(m, False) # last condition is 2*x1 >= -2
((x2 - 3*x3,
[-3*x1 - 2*x2 + 4*x3 >= -2, 2*x1 >= -2]),
(-2*y1 - 2*y2,
[-3*y1 + 2*y2 <= 0, -2*y1 <= 1, 4*y1 <= -3]))
>>> _primal_dual(m) # condition now x1 >= -1
((x2 - 3*x3,
[-3*x1 - 2*x2 + 4*x3 >= -2, x1 >= -1]),
(-2*y1 - 2*y2,
[-3*y1 + 2*y2 <= 0, -2*y1 <= 1, 4*y1 <= -3]))
If you pass the transpose of the matrix, the primal will be
identified as the standard minimization problem and the
dual as the standard maximization:
>>> _primal_dual(m.T)
((-2*x1 - 2*x2,
[-3*x1 + 2*x2 >= 0, -2*x1 >= 1, 4*x1 >= -3]),
(y2 - 3*y3,
[-3*y1 - 2*y2 + 4*y3 <= -2, y1 <= -1]))
A matrix must have some size or else None will be returned for
the functions:
>>> _primal_dual(Matrix([[1, 2]]))
((x1 - 2, []), (-2, []))
>>> _primal_dual(Matrix([]))
((None, []), (None, []))
References
==========
.. [1] David Galvin, Relations between Primal and Dual
www3.nd.edu/~dgalvin1/30210/30210_F07/presentations/dual_opt.pdf
"""
if not M:
return (None, []), (None, [])
if not hasattr(M, "shape"):
if len(M) not in (3, 4):
raise ValueError("expecting Matrix or 3 or 4 lists")
M = _m(*M)
m, n = [i - 1 for i in M.shape]
A, b, c, d = _abcd(M)
d = d[0]
_ = lambda x: numbered_symbols(x, start=1)
x = Matrix([i for i, j in zip(_("x"), range(n))])
yT = Matrix([i for i, j in zip(_("y"), range(m))]).T
def ineq(L, r, op):
rv = []
for r in (op(i, j) for i, j in zip(L, r)):
if r == True:
continue
elif r == False:
return [False]
if factor:
f = factor_terms(r)
if f.lhs.is_Mul and f.rhs % f.lhs.args[0] == 0:
assert len(f.lhs.args) == 2, f.lhs
k = f.lhs.args[0]
r = r.func(sign(k) * f.lhs.args[1], f.rhs // abs(k))
rv.append(r)
return rv
eq = lambda x, d: x[0] - d if x else -d
F = eq(c * x, d)
f = eq(yT * b, d)
return (F, ineq(A * x, b, Ge)), (f, ineq(yT * A, c, Le))
def _rel_as_nonpos(constr, syms):
"""return `(np, d, aux)` where `np` is a list of nonpositive
expressions that represent the given constraints (possibly
rewritten in terms of auxilliary variables) expressible with
nonnegative symbols, and `d` is a dictionary mapping a given
symbols to an expression with an auxilliary variable. In some
cases a symbol will be used as part of the change of variables,
e.g. x: x - z1 instead of x: z1 - z2.
If any constraint is False/empty, return None. All variables in
``constr`` are assumed to be unbounded unless explicitly indicated
otherwise with a univariate constraint, e.g. ``x >= 0`` will
restrict ``x`` to nonnegative values.
The ``syms`` must be included so all symbols can be given an
unbounded assumption if they are not otherwise bound with
univariate conditions like ``x <= 3``.
Examples
========
>>> from sympy.solvers.simplex import _rel_as_nonpos
>>> from sympy.abc import x, y
>>> _rel_as_nonpos([x >= y, x >= 0, y >= 0], (x, y))
([-x + y], {}, [])
>>> _rel_as_nonpos([x >= 3, x <= 5], [x])
([_z1 - 2], {x: _z1 + 3}, [_z1])
>>> _rel_as_nonpos([x <= 5], [x])
([], {x: 5 - _z1}, [_z1])
>>> _rel_as_nonpos([x >= 1], [x])
([], {x: _z1 + 1}, [_z1])
"""
r = {} # replacements to handle change of variables
np = [] # nonpositive expressions
aux = [] # auxilliary symbols added
ui = numbered_symbols("z", start=1, cls=Dummy) # auxilliary symbols
univariate = {} # {x: interval} for univariate constraints
unbound = [] # symbols designated as unbound
syms = set(syms) # the expected syms of the system
# separate out univariates
for i in constr:
if i == True:
continue # ignore
if i == False:
return # no solution
if i.has(S.Infinity, S.NegativeInfinity):
raise ValueError("only finite bounds are permitted")
if isinstance(i, (Le, Ge)):
i = i.lts - i.gts
freei = i.free_symbols
if freei - syms:
raise ValueError(
"unexpected symbol(s) in constraint: %s" % (freei - syms)
)
if len(freei) > 1:
np.append(i)
elif freei:
x = freei.pop()
if x in unbound:
continue # will handle later
ivl = Le(i, 0, evaluate=False).as_set()
if x not in univariate:
univariate[x] = ivl
else:
univariate[x] &= ivl
elif i:
return False
else:
raise TypeError(filldedent("""
only equalities like Eq(x, y) or non-strict
inequalities like x >= y are allowed in lp, not %s""" % i))
# introduce auxilliary variables as needed for univariate
# inequalities
for x in syms:
i = univariate.get(x, True)
if not i:
return None # no solution possible
if i == True:
unbound.append(x)
continue
a, b = i.inf, i.sup
if a.is_infinite:
u = next(ui)
r[x] = b - u
aux.append(u)
elif b.is_infinite:
if a:
u = next(ui)
r[x] = a + u
aux.append(u)
else:
# standard nonnegative relationship
pass
else:
u = next(ui)
aux.append(u)
# shift so u = x - a => x = u + a
r[x] = u + a
# add constraint for u <= b - a
# since when u = b-a then x = u + a = b - a + a = b:
# the upper limit for x
np.append(u - (b - a))
# make change of variables for unbound variables
for x in unbound:
u = next(ui)
r[x] = u - x # reusing x
aux.append(u)
return np, r, aux
def _lp_matrices(objective, constraints):
"""return A, B, C, D, r, x+X, X for maximizing
objective = Cx - D with constraints Ax <= B, introducing
introducing auxilliary variables, X, as necessary to make
replacements of symbols as given in r, {xi: expression with Xj},
so all variables in x+X will take on nonnegative values.
Every univariate condition creates a semi-infinite
condition, e.g. a single ``x <= 3`` creates the
interval ``[-oo, 3]`` while ``x <= 3`` and ``x >= 2``
create an interval ``[2, 3]``. Variables not in a univariate
expression will take on nonnegative values.
"""
# sympify input and collect free symbols
F = sympify(objective)
np = [sympify(i) for i in constraints]
syms = set.union(*[i.free_symbols for i in [F] + np], set())
# change Eq(x, y) to x - y <= 0 and y - x <= 0
for i in range(len(np)):
if isinstance(np[i], Eq):
np[i] = np[i].lhs - np[i].rhs <= 0
np.append(-np[i].lhs <= 0)
# convert constraints to nonpositive expressions
_ = _rel_as_nonpos(np, syms)
if _ is None:
raise InfeasibleLPError(filldedent("""
Inconsistent/False constraint"""))
np, r, aux = _
# do change of variables
F = F.xreplace(r)
np = [i.xreplace(r) for i in np]
# convert to matrices
xx = list(ordered(syms)) + aux
A, B = linear_eq_to_matrix(np, xx)
C, D = linear_eq_to_matrix([F], xx)
return A, B, C, D, r, xx, aux
def _lp(min_max, f, constr):
"""Return the optimization (min or max) of ``f`` with the given
constraints. All variables are unbounded unless constrained.
If `min_max` is 'max' then the results corresponding to the
maximization of ``f`` will be returned, else the minimization.
The constraints can be given as Le, Ge or Eq expressions.
Examples
========
>>> from sympy.solvers.simplex import _lp as lp
>>> from sympy import Eq
>>> from sympy.abc import x, y, z
>>> f = x + y - 2*z
>>> c = [7*x + 4*y - 7*z <= 3, 3*x - y + 10*z <= 6]
>>> c += [i >= 0 for i in (x, y, z)]
>>> lp(min, f, c)
(-6/5, {x: 0, y: 0, z: 3/5})
By passing max, the maximum value for f under the constraints
is returned (if possible):
>>> lp(max, f, c)
(3/4, {x: 0, y: 3/4, z: 0})
Constraints that are equalities will require that the solution
also satisfy them:
>>> lp(max, f, c + [Eq(y - 9*x, 1)])
(5/7, {x: 0, y: 1, z: 1/7})
All symbols are reported, even if they are not in the objective
function:
>>> lp(min, x, [y + x >= 3, x >= 0])
(0, {x: 0, y: 3})
"""
# get the matrix components for the system expressed
# in terms of only nonnegative variables
A, B, C, D, r, xx, aux = _lp_matrices(f, constr)
how = str(min_max).lower()
if "max" in how:
# _simplex minimizes for Ax <= B so we
# have to change the sign of the function
# and negate the optimal value returned
_o, p, d = _simplex(A, B, -C, -D)
o = -_o
elif "min" in how:
o, p, d = _simplex(A, B, C, D)
else:
raise ValueError("expecting min or max")
# restore original variables and remove aux from p
p = dict(zip(xx, p))
if r: # p has original symbols and auxilliary symbols
# if r has x: x - z1 use values from p to update
r = {k: v.xreplace(p) for k, v in r.items()}
# then use the actual value of x (= x - z1) in p
p.update(r)
# don't show aux
p = {k: p[k] for k in ordered(p) if k not in aux}
# not returning dual since there may be extra constraints
# when a variable has finite bounds
return o, p
def lpmin(f, constr):
"""return minimum of linear equation ``f`` under
linear constraints expressed using Ge, Le or Eq.
All variables are unbounded unless constrained.
Examples
========
>>> from sympy.solvers.simplex import lpmin
>>> from sympy import Eq
>>> from sympy.abc import x, y
>>> lpmin(x, [2*x - 3*y >= -1, Eq(x + 3*y, 2), x <= 2*y])
(1/3, {x: 1/3, y: 5/9})
Negative values for variables are permitted unless explicitly
excluding, so minimizing ``x`` for ``x <= 3`` is an
unbounded problem while the following has a bounded solution:
>>> lpmin(x, [x >= 0, x <= 3])
(0, {x: 0})
Without indicating that ``x`` is nonnegative, there
is no minimum for this objective:
>>> lpmin(x, [x <= 3])
Traceback (most recent call last):
...
sympy.solvers.simplex.UnboundedLPError:
Objective function can assume arbitrarily large values!
See Also
========
linprog, lpmax
"""
return _lp(min, f, constr)
def lpmax(f, constr):
"""return maximum of linear equation ``f`` under
linear constraints expressed using Ge, Le or Eq.
All variables are unbounded unless constrained.
Examples
========
>>> from sympy.solvers.simplex import lpmax
>>> from sympy import Eq
>>> from sympy.abc import x, y
>>> lpmax(x, [2*x - 3*y >= -1, Eq(x+ 3*y,2), x <= 2*y])
(4/5, {x: 4/5, y: 2/5})
Negative values for variables are permitted unless explicitly
excluding:
>>> lpmax(x, [x <= -1])
(-1, {x: -1})
If a non-negative constraint is added for x, there is no
possible solution:
>>> lpmax(x, [x <= -1, x >= 0])
Traceback (most recent call last):
...
sympy.solvers.simplex.InfeasibleLPError: inconsistent/False constraint
See Also
========
linprog, lpmin
"""
return _lp(max, f, constr)
def _handle_bounds(bounds):
# introduce auxiliary variables as needed for univariate
# inequalities
def _make_list(length: int, index_value_pairs):
li = [0] * length
for idx, val in index_value_pairs:
li[idx] = val
return li
unbound = []
row = []
row2 = []
b_len = len(bounds)
for x, (a, b) in enumerate(bounds):
if a is None and b is None:
unbound.append(x)
elif a is None:
# r[x] = b - u
b_len += 1
row.append(_make_list(b_len, [(x, 1), (-1, 1)]))
row.append(_make_list(b_len, [(x, -1), (-1, -1)]))
row2.extend([[b], [-b]])
elif b is None:
if a:
# r[x] = a + u
b_len += 1
row.append(_make_list(b_len, [(x, 1), (-1, -1)]))
row.append(_make_list(b_len, [(x, -1), (-1, 1)]))
row2.extend([[a], [-a]])
else:
# standard nonnegative relationship
pass
else:
# r[x] = u + a
b_len += 1
row.append(_make_list(b_len, [(x, 1), (-1, -1)]))
row.append(_make_list(b_len, [(x, -1), (-1, 1)]))
# u <= b - a
row.append(_make_list(b_len, [(-1, 1)]))
row2.extend([[a], [-a], [b - a]])
# make change of variables for unbound variables
for x in unbound:
# r[x] = u - v
b_len += 2
row.append(_make_list(b_len, [(x, 1), (-1, 1), (-2, -1)]))
row.append(_make_list(b_len, [(x, -1), (-1, -1), (-2, 1)]))
row2.extend([[0], [0]])
return Matrix([r + [0]*(b_len - len(r)) for r in row]), Matrix(row2)
def linprog(c, A=None, b=None, A_eq=None, b_eq=None, bounds=None):
"""Return the minimization of ``c*x`` with the given
constraints ``A*x <= b`` and ``A_eq*x = b_eq``. Unless bounds
are given, variables will have nonnegative values in the solution.
If ``A`` is not given, then the dimension of the system will
be determined by the length of ``C``.
By default, all variables will be nonnegative. If ``bounds``
is given as a single tuple, ``(lo, hi)``, then all variables
will be constrained to be between ``lo`` and ``hi``. Use
None for a ``lo`` or ``hi`` if it is unconstrained in the
negative or positive direction, respectively, e.g.
``(None, 0)`` indicates nonpositive values. To set
individual ranges, pass a list with length equal to the
number of columns in ``A``, each element being a tuple; if
only a few variables take on non-default values they can be
passed as a dictionary with keys giving the corresponding
column to which the variable is assigned, e.g. ``bounds={2:
(1, 4)}`` would limit the 3rd variable to have a value in
range ``[1, 4]``.
Examples
========
>>> from sympy.solvers.simplex import linprog
>>> from sympy import symbols, Eq, linear_eq_to_matrix as M, Matrix
>>> x = x1, x2, x3, x4 = symbols('x1:5')
>>> X = Matrix(x)
>>> c, d = M(5*x2 + x3 + 4*x4 - x1, x)
>>> a, b = M([5*x2 + 2*x3 + 5*x4 - (x1 + 5)], x)
>>> aeq, beq = M([Eq(3*x2 + x4, 2), Eq(-x1 + x3 + 2*x4, 1)], x)
>>> constr = [i <= j for i,j in zip(a*X, b)]
>>> constr += [Eq(i, j) for i,j in zip(aeq*X, beq)]
>>> linprog(c, a, b, aeq, beq)
(9/2, [0, 1/2, 0, 1/2])
>>> assert all(i.subs(dict(zip(x, _[1]))) for i in constr)
See Also
========
lpmin, lpmax
"""
## the objective
C = Matrix(c)
if C.rows != 1 and C.cols == 1:
C = C.T
if C.rows != 1:
raise ValueError("C must be a single row.")
## the inequalities
if not A:
if b:
raise ValueError("A and b must both be given")
# the governing equations will be simple constraints
# on variables
A, b = zeros(0, C.cols), zeros(C.cols, 1)
else:
A, b = [Matrix(i) for i in (A, b)]
if A.cols != C.cols:
raise ValueError("number of columns in A and C must match")
## the equalities
if A_eq is None:
if b_eq is not None:
raise ValueError("A_eq and b_eq must both be given")
else:
A_eq, b_eq = [Matrix(i) for i in (A_eq, b_eq)]
# if x == y then x <= y and x >= y (-x <= -y)
A = A.col_join(A_eq)
A = A.col_join(-A_eq)
b = b.col_join(b_eq)
b = b.col_join(-b_eq)
if not (bounds is None or bounds == {} or bounds == (0, None)):
## the bounds are interpreted
if type(bounds) is tuple and len(bounds) == 2:
bounds = [bounds] * A.cols
elif len(bounds) == A.cols and all(
type(i) is tuple and len(i) == 2 for i in bounds):
pass # individual bounds
elif type(bounds) is dict and all(
type(i) is tuple and len(i) == 2
for i in bounds.values()):
# sparse bounds
db = bounds
bounds = [(0, None)] * A.cols
while db:
i, j = db.popitem()
bounds[i] = j # IndexError if out-of-bounds indices
else:
raise ValueError("unexpected bounds %s" % bounds)
A_, b_ = _handle_bounds(bounds)
aux = A_.cols - A.cols
if A:
A = Matrix([[A, zeros(A.rows, aux)], [A_]])
b = b.col_join(b_)
else:
A = A_
b = b_
C = C.row_join(zeros(1, aux))
else:
aux = -A.cols # set so -aux will give all cols below
o, p, d = _simplex(A, b, C)
return o, p[:-aux] # don't include aux values
def show_linprog(c, A=None, b=None, A_eq=None, b_eq=None, bounds=None):
from sympy import symbols
## the objective
C = Matrix(c)
if C.rows != 1 and C.cols == 1:
C = C.T
if C.rows != 1:
raise ValueError("C must be a single row.")
## the inequalities
if not A:
if b:
raise ValueError("A and b must both be given")
# the governing equations will be simple constraints
# on variables
A, b = zeros(0, C.cols), zeros(C.cols, 1)
else:
A, b = [Matrix(i) for i in (A, b)]
if A.cols != C.cols:
raise ValueError("number of columns in A and C must match")
## the equalities
if A_eq is None:
if b_eq is not None:
raise ValueError("A_eq and b_eq must both be given")
else:
A_eq, b_eq = [Matrix(i) for i in (A_eq, b_eq)]
if not (bounds is None or bounds == {} or bounds == (0, None)):
## the bounds are interpreted
if type(bounds) is tuple and len(bounds) == 2:
bounds = [bounds] * A.cols
elif len(bounds) == A.cols and all(
type(i) is tuple and len(i) == 2 for i in bounds):
pass # individual bounds
elif type(bounds) is dict and all(
type(i) is tuple and len(i) == 2
for i in bounds.values()):
# sparse bounds
db = bounds
bounds = [(0, None)] * A.cols
while db:
i, j = db.popitem()
bounds[i] = j # IndexError if out-of-bounds indices
else:
raise ValueError("unexpected bounds %s" % bounds)
x = Matrix(symbols('x1:%s' % (A.cols+1)))
f,c = (C*x)[0], [i<=j for i,j in zip(A*x, b)] + [Eq(i,j) for i,j in zip(A_eq*x,b_eq)]
for i, (lo, hi) in enumerate(bounds):
if lo is not None:
c.append(x[i]>=lo)
if hi is not None:
c.append(x[i]<=hi)
return f,c
| InfeasibleLPError |
python | bokeh__bokeh | tests/unit/bokeh/test_settings.py | {
"start": 2040,
"end": 4157
} | class ____:
def test_standard_settings(self) -> None:
settings = [k for k,v in bs.settings.__class__.__dict__.items() if isinstance(v, bs.PrioritizedSetting)]
assert set(settings) == set(_expected_settings)
@pytest.mark.parametrize("name", _expected_settings)
def test_prefix(self, name: str) -> None:
ps = getattr(bs.settings, name)
assert ps.env_var.startswith("BOKEH_")
@pytest.mark.parametrize("name", _expected_settings)
def test_parent(self, name: str) -> None:
ps = getattr(bs.settings, name)
assert ps._parent == bs.settings
def test_types(self) -> None:
assert bs.settings.ignore_filename.convert_type == "Bool"
assert bs.settings.minified.convert_type == "Bool"
assert bs.settings.perform_document_validation.convert_type == "Bool"
assert bs.settings.simple_ids.convert_type == "Bool"
assert bs.settings.xsrf_cookies.convert_type == "Bool"
assert bs.settings.default_server_port.convert_type == "Int"
assert bs.settings.compression_level.convert_type == "Compression Level (0-9)"
assert bs.settings.py_log_level.convert_type == "Log Level"
assert bs.settings.validation_level.convert_type == "Validation Level"
assert bs.settings.allowed_ws_origin.convert_type == "List[String]"
assert bs.settings.ico_path.convert_type == "Ico Path"
default_typed = set(_expected_settings) - {
'allowed_ws_origin',
'compression_level',
'default_server_port',
'ico_path',
'ignore_filename',
'minified',
'perform_document_validation',
'py_log_level',
'simple_ids',
'validation_level',
'xsrf_cookies',
}
for name in default_typed:
ps = getattr(bs.settings, name)
assert ps.convert_type == "String"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
| TestSettings |
python | kubernetes-client__python | kubernetes/client/models/v1_validating_admission_policy_binding.py | {
"start": 383,
"end": 7040
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1ValidatingAdmissionPolicyBindingSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1ValidatingAdmissionPolicyBinding - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ValidatingAdmissionPolicyBinding.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ValidatingAdmissionPolicyBinding.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
:return: The metadata of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ValidatingAdmissionPolicyBinding.
:param metadata: The metadata of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
:return: The spec of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
:rtype: V1ValidatingAdmissionPolicyBindingSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1ValidatingAdmissionPolicyBinding.
:param spec: The spec of this V1ValidatingAdmissionPolicyBinding. # noqa: E501
:type: V1ValidatingAdmissionPolicyBindingSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ValidatingAdmissionPolicyBinding):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ValidatingAdmissionPolicyBinding):
return True
return self.to_dict() != other.to_dict()
| V1ValidatingAdmissionPolicyBinding |
python | astropy__astropy | astropy/coordinates/tests/test_representation_arithmetic.py | {
"start": 18104,
"end": 24655
} | class ____:
@staticmethod
def check_unit_vectors(e):
for v in e.values():
assert type(v) is CartesianRepresentation
assert_quantity_allclose(v.norm(), 1.0 * u.one)
return e
@staticmethod
def check_scale_factors(sf, rep):
unit = rep.norm().unit
for c, f in sf.items():
assert type(f) is u.Quantity
assert (f.unit * getattr(rep, c).unit).is_equivalent(unit)
def test_spherical(self):
s = SphericalRepresentation(
lon=[0.0, 6.0, 21.0] * u.hourangle,
lat=[0.0, -30.0, 85.0] * u.deg,
distance=[1, 2, 3] * u.kpc,
)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_lon = s + s.distance * 1e-5 * np.cos(s.lat) * e["lon"]
assert_quantity_allclose(s_lon.lon, s.lon + 1e-5 * u.rad, atol=1e-10 * u.rad)
assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10 * u.rad)
assert_quantity_allclose(s_lon.distance, s.distance)
s_lon2 = s + 1e-5 * u.radian * sf["lon"] * e["lon"]
assert_representation_allclose(s_lon2, s_lon)
s_lat = s + s.distance * 1e-5 * e["lat"]
assert_quantity_allclose(s_lat.lon, s.lon)
assert_quantity_allclose(s_lat.lat, s.lat + 1e-5 * u.rad, atol=1e-10 * u.rad)
assert_quantity_allclose(s_lon.distance, s.distance)
s_lat2 = s + 1.0e-5 * u.radian * sf["lat"] * e["lat"]
assert_representation_allclose(s_lat2, s_lat)
s_distance = s + 1.0 * u.pc * e["distance"]
assert_quantity_allclose(s_distance.lon, s.lon, atol=1e-10 * u.rad)
assert_quantity_allclose(s_distance.lat, s.lat, atol=1e-10 * u.rad)
assert_quantity_allclose(s_distance.distance, s.distance + 1.0 * u.pc)
s_distance2 = s + 1.0 * u.pc * sf["distance"] * e["distance"]
assert_representation_allclose(s_distance2, s_distance)
def test_unit_spherical(self):
s = UnitSphericalRepresentation(
lon=[0.0, 6.0, 21.0] * u.hourangle, lat=[0.0, -30.0, 85.0] * u.deg
)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_lon = s + 1e-5 * np.cos(s.lat) * e["lon"]
assert_quantity_allclose(s_lon.lon, s.lon + 1e-5 * u.rad, atol=1e-10 * u.rad)
assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10 * u.rad)
s_lon2 = s + 1e-5 * u.radian * sf["lon"] * e["lon"]
assert_representation_allclose(s_lon2, s_lon)
s_lat = s + 1e-5 * e["lat"]
assert_quantity_allclose(s_lat.lon, s.lon)
assert_quantity_allclose(s_lat.lat, s.lat + 1e-5 * u.rad, atol=1e-10 * u.rad)
s_lat2 = s + 1.0e-5 * u.radian * sf["lat"] * e["lat"]
assert_representation_allclose(s_lat2, s_lat)
def test_radial(self):
r = RadialRepresentation(10.0 * u.kpc)
with pytest.raises(NotImplementedError):
r.unit_vectors()
sf = r.scale_factors()
assert np.all(sf["distance"] == 1.0 * u.one)
assert np.all(r.norm() == r.distance)
with pytest.raises(TypeError):
r + r
def test_physical_spherical(self):
s = PhysicsSphericalRepresentation(
phi=[0.0, 6.0, 21.0] * u.hourangle,
theta=[90.0, 120.0, 5.0] * u.deg,
r=[1, 2, 3] * u.kpc,
)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_phi = s + s.r * 1e-5 * np.sin(s.theta) * e["phi"]
assert_quantity_allclose(s_phi.phi, s.phi + 1e-5 * u.rad, atol=1e-10 * u.rad)
assert_quantity_allclose(s_phi.theta, s.theta, atol=1e-10 * u.rad)
assert_quantity_allclose(s_phi.r, s.r)
s_phi2 = s + 1e-5 * u.radian * sf["phi"] * e["phi"]
assert_representation_allclose(s_phi2, s_phi)
s_theta = s + s.r * 1e-5 * e["theta"]
assert_quantity_allclose(s_theta.phi, s.phi)
assert_quantity_allclose(
s_theta.theta, s.theta + 1e-5 * u.rad, atol=1e-10 * u.rad
)
assert_quantity_allclose(s_theta.r, s.r)
s_theta2 = s + 1.0e-5 * u.radian * sf["theta"] * e["theta"]
assert_representation_allclose(s_theta2, s_theta)
s_r = s + 1.0 * u.pc * e["r"]
assert_quantity_allclose(s_r.phi, s.phi, atol=1e-10 * u.rad)
assert_quantity_allclose(s_r.theta, s.theta, atol=1e-10 * u.rad)
assert_quantity_allclose(s_r.r, s.r + 1.0 * u.pc)
s_r2 = s + 1.0 * u.pc * sf["r"] * e["r"]
assert_representation_allclose(s_r2, s_r)
def test_cartesian(self):
s = CartesianRepresentation(
x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.Mpc, z=[3, 4, 5] * u.kpc
)
e = s.unit_vectors()
sf = s.scale_factors()
for v, expected in zip(
e.values(),
([1.0, 0.0, 0.0] * u.one, [0.0, 1.0, 0.0] * u.one, [0.0, 0.0, 1.0] * u.one),
):
assert np.all(v.get_xyz(xyz_axis=-1) == expected)
for f in sf.values():
assert np.all(f == 1.0 * u.one)
def test_cylindrical(self):
s = CylindricalRepresentation(
rho=[1, 2, 3] * u.pc, phi=[0.0, 90.0, -45.0] * u.deg, z=[3, 4, 5] * u.kpc
)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_rho = s + 1.0 * u.pc * e["rho"]
assert_quantity_allclose(s_rho.rho, s.rho + 1.0 * u.pc)
assert_quantity_allclose(s_rho.phi, s.phi)
assert_quantity_allclose(s_rho.z, s.z)
s_rho2 = s + 1.0 * u.pc * sf["rho"] * e["rho"]
assert_representation_allclose(s_rho2, s_rho)
s_phi = s + s.rho * 1e-5 * e["phi"]
assert_quantity_allclose(s_phi.rho, s.rho)
assert_quantity_allclose(s_phi.phi, s.phi + 1e-5 * u.rad)
assert_quantity_allclose(s_phi.z, s.z)
s_phi2 = s + 1e-5 * u.radian * sf["phi"] * e["phi"]
assert_representation_allclose(s_phi2, s_phi)
s_z = s + 1.0 * u.pc * e["z"]
assert_quantity_allclose(s_z.rho, s.rho)
assert_quantity_allclose(s_z.phi, s.phi, atol=1e-10 * u.rad)
assert_quantity_allclose(s_z.z, s.z + 1.0 * u.pc)
s_z2 = s + 1.0 * u.pc * sf["z"] * e["z"]
assert_representation_allclose(s_z2, s_z)
@pytest.mark.parametrize("omit_coslat", [False, True], scope="class")
| TestUnitVectorsAndScales |
python | astropy__astropy | astropy/io/ascii/core.py | {
"start": 35823,
"end": 41112
} | class ____:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
# User-defined converters which gets set in ascii.ui if a `converter` kwarg
# is supplied.
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type).
"""
# Allow specifying a single converter instead of a list of converters.
# The input `converters` must be a ``type`` value that can init np.dtype.
if type(converters) is type:
try:
# Don't allow list-like things that dtype accepts
converters = [np.dtype(converters)]
except TypeError:
pass
converters_out = []
try:
for converter in converters:
try:
converter_func, converter_type = converter
except TypeError as err:
if str(err).startswith("cannot unpack"):
converter_func, converter_type = convert_numpy(converter)
else:
raise
if not issubclass(converter_type, NoType):
raise ValueError("converter_type must be a subclass of NoType")
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError) as err:
raise ValueError(
"Error: invalid format for converters, see "
f"documentation\n{converters}: {err}"
)
return converters_out
def _convert_vals(self, cols):
for col in cols:
for key, converters in self.converters.items():
if fnmatch.fnmatch(col.name, key):
break
else:
if col.dtype is not None:
converters = [convert_numpy(col.dtype)]
else:
converters = self.default_converters
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = "no converters defined"
while not hasattr(col, "data"):
# Try converters, popping the unsuccessful ones from the list.
# If there are no converters left here then fail.
if not col.converters:
raise ValueError(f"Column {col.name} failed to convert: {last_err}")
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError(
f"converter type {converter_type.__name__} does not match"
f" column type {col.type.__name__} for column {col.name}"
)
try:
col.data = converter_func(col.str_vals)
col.type = converter_type
except (OverflowError, TypeError, ValueError) as err:
# Overflow during conversion (most likely an int that
# doesn't fit in native C long). Put string at the top of
# the converters list for the next while iteration.
# With python/cpython#95778 this has been supplemented with a
# "ValueError: Exceeds the limit (4300) for integer string conversion"
# so need to catch that as well.
if isinstance(err, OverflowError) or (
isinstance(err, ValueError)
and str(err).startswith("Exceeds the limit")
):
warnings.warn(
f"OverflowError converting to {converter_type.__name__} in"
f" column {col.name}, reverting to String.",
AstropyWarning,
)
col.converters.insert(0, convert_numpy(str))
else:
col.converters.pop(0)
last_err = err
def _deduplicate_names(names: list[str]) -> list[str]:
"""Ensure there are no duplicates in ``names``.
This is done by iteratively adding ``_<N>`` to the name for increasing N
until the name is unique.
"""
new_names = []
existing_names = set()
for name in names:
base_name = name + "_"
i = 1
while name in existing_names:
# Iterate until a unique name is found
name = base_name + str(i)
i += 1
new_names.append(name)
existing_names.add(name)
return new_names
| BaseOutputter |
python | coleifer__peewee | tests/mysql_ext.py | {
"start": 2692,
"end": 2842
} | class ____(TestMySQLConnector):
database = mariadb_db
@requires_mysql
@skip_unless(IS_MYSQL_JSON, 'requires MySQL 5.7+ or 8.x')
| TestMariaDBConnector |
python | python-pillow__Pillow | src/PIL/ImageDraw2.py | {
"start": 1391,
"end": 7227
} | class ____:
"""
(Experimental) WCK-style drawing interface
"""
def __init__(
self,
image: Image.Image | str,
size: tuple[int, int] | list[int] | None = None,
color: float | tuple[float, ...] | str | None = None,
) -> None:
if isinstance(image, str):
if size is None:
msg = "If image argument is mode string, size must be a list or tuple"
raise ValueError(msg)
image = Image.new(image, size, color)
self.draw = ImageDraw.Draw(image)
self.image = image
self.transform: tuple[float, float, float, float, float, float] | None = None
def flush(self) -> Image.Image:
return self.image
def render(
self,
op: str,
xy: Coords,
pen: Pen | Brush | None,
brush: Brush | Pen | None = None,
**kwargs: Any,
) -> None:
# handle color arguments
outline = fill = None
width = 1
if isinstance(pen, Pen):
outline = pen.color
width = pen.width
elif isinstance(brush, Pen):
outline = brush.color
width = brush.width
if isinstance(brush, Brush):
fill = brush.color
elif isinstance(pen, Brush):
fill = pen.color
# handle transformation
if self.transform:
path = ImagePath.Path(xy)
path.transform(self.transform)
xy = path
# render the item
if op in ("arc", "line"):
kwargs.setdefault("fill", outline)
else:
kwargs.setdefault("fill", fill)
kwargs.setdefault("outline", outline)
if op == "line":
kwargs.setdefault("width", width)
getattr(self.draw, op)(xy, **kwargs)
def settransform(self, offset: tuple[float, float]) -> None:
"""Sets a transformation offset."""
(xoffset, yoffset) = offset
self.transform = (1, 0, xoffset, 0, 1, yoffset)
def arc(
self,
xy: Coords,
pen: Pen | Brush | None,
start: float,
end: float,
*options: Any,
) -> None:
"""
Draws an arc (a portion of a circle outline) between the start and end
angles, inside the given bounding box.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.arc`
"""
self.render("arc", xy, pen, *options, start=start, end=end)
def chord(
self,
xy: Coords,
pen: Pen | Brush | None,
start: float,
end: float,
*options: Any,
) -> None:
"""
Same as :py:meth:`~PIL.ImageDraw2.Draw.arc`, but connects the end points
with a straight line.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.chord`
"""
self.render("chord", xy, pen, *options, start=start, end=end)
def ellipse(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None:
"""
Draws an ellipse inside the given bounding box.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.ellipse`
"""
self.render("ellipse", xy, pen, *options)
def line(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None:
"""
Draws a line between the coordinates in the ``xy`` list.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.line`
"""
self.render("line", xy, pen, *options)
def pieslice(
self,
xy: Coords,
pen: Pen | Brush | None,
start: float,
end: float,
*options: Any,
) -> None:
"""
Same as arc, but also draws straight lines between the end points and the
center of the bounding box.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.pieslice`
"""
self.render("pieslice", xy, pen, *options, start=start, end=end)
def polygon(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None:
"""
Draws a polygon.
The polygon outline consists of straight lines between the given
coordinates, plus a straight line between the last and the first
coordinate.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.polygon`
"""
self.render("polygon", xy, pen, *options)
def rectangle(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None:
"""
Draws a rectangle.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.rectangle`
"""
self.render("rectangle", xy, pen, *options)
def text(self, xy: tuple[float, float], text: AnyStr, font: Font) -> None:
"""
Draws the string at the given position.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.text`
"""
if self.transform:
path = ImagePath.Path(xy)
path.transform(self.transform)
xy = path
self.draw.text(xy, text, font=font.font, fill=font.color)
def textbbox(
self, xy: tuple[float, float], text: AnyStr, font: Font
) -> tuple[float, float, float, float]:
"""
Returns bounding box (in pixels) of given text.
:return: ``(left, top, right, bottom)`` bounding box
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textbbox`
"""
if self.transform:
path = ImagePath.Path(xy)
path.transform(self.transform)
xy = path
return self.draw.textbbox(xy, text, font=font.font)
def textlength(self, text: AnyStr, font: Font) -> float:
"""
Returns length (in pixels) of given text.
This is the amount by which following text should be offset.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textlength`
"""
return self.draw.textlength(text, font=font.font)
| Draw |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 138828,
"end": 176075
} | class ____(AnyMarkConfig):
"""
BarConfig schema wrapper.
Parameters
----------
align : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
The horizontal alignment of the text or ranged marks (area, bar, image, rect, rule).
One of ``"left"``, ``"right"``, ``"center"``.
**Note:** Expression reference is *not* supported for range marks.
angle : dict, float, :class:`ExprRef`
The rotation angle of the text, in degrees.
aria : bool, dict, :class:`ExprRef`
A boolean flag indicating if `ARIA attributes
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be
included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on
the output SVG element, removing the mark item from the ARIA accessibility tree.
ariaRole : str, dict, :class:`ExprRef`
Sets the type of user interface element of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "role" attribute. Warning: this
property is experimental and may be changed in the future.
ariaRoleDescription : str, dict, :class:`ExprRef`
A human-readable, author-localized description for the role of the mark item for
`ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "aria-roledescription" attribute.
Warning: this property is experimental and may be changed in the future.
aspect : bool, dict, :class:`ExprRef`
Whether to keep aspect ratio of image marks.
baseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
For text marks, the vertical text baseline. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, ``"line-bottom"``, or an
expression reference that provides one of the valid values. The ``"line-top"`` and
``"line-bottom"`` values operate similarly to ``"top"`` and ``"bottom"``, but are
calculated relative to the ``lineHeight`` rather than ``fontSize`` alone.
For range marks, the vertical alignment of the marks. One of ``"top"``,
``"middle"``, ``"bottom"``.
**Note:** Expression reference is *not* supported for range marks.
binSpacing : float
Offset between bars for binned field. The ideal value for this is either 0
(preferred by statisticians) or 1 (Vega-Lite default, D3 example style).
**Default value:** ``1``
blend : dict, :class:`Blend`, :class:`ExprRef`, Literal[None, 'multiply', 'screen', 'overlay', 'darken', 'lighten', 'color-dodge', 'color-burn', 'hard-light', 'soft-light', 'difference', 'exclusion', 'hue', 'saturation', 'color', 'luminosity']
The color blend mode for drawing an item on its current background. Any valid `CSS
mix-blend-mode <https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode>`__
value can be used.
**Default value:** ``"source-over"``
color : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Default color.
**Default value:** ``"#4682b4"``
**Note:**
* This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
* The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and
will override ``color``.
continuousBandSize : float
The default size of the bars on continuous scales.
**Default value:** ``5``
cornerRadius : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles or arcs' corners.
**Default value:** ``0``
cornerRadiusBottomLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom left corner.
**Default value:** ``0``
cornerRadiusBottomRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom right corner.
**Default value:** ``0``
cornerRadiusEnd : dict, float, :class:`ExprRef`
* For vertical bars, top-left and top-right corner radius.
* For horizontal bars, top-right and bottom-right corner radius.
cornerRadiusTopLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top right corner.
**Default value:** ``0``
cornerRadiusTopRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top left corner.
**Default value:** ``0``
cursor : dict, :class:`Cursor`, :class:`ExprRef`, Literal['auto', 'default', 'none', 'context-menu', 'help', 'pointer', 'progress', 'wait', 'cell', 'crosshair', 'text', 'vertical-text', 'alias', 'copy', 'move', 'no-drop', 'not-allowed', 'e-resize', 'n-resize', 'ne-resize', 'nw-resize', 's-resize', 'se-resize', 'sw-resize', 'w-resize', 'ew-resize', 'ns-resize', 'nesw-resize', 'nwse-resize', 'col-resize', 'row-resize', 'all-scroll', 'zoom-in', 'zoom-out', 'grab', 'grabbing']
The mouse cursor used over the mark. Any valid `CSS cursor type
<https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values>`__ can be used.
description : str, dict, :class:`ExprRef`
A text description of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the `"aria-label" attribute
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/ARIA_Techniques/Using_the_aria-label_attribute>`__.
dir : dict, :class:`ExprRef`, :class:`TextDirection`, Literal['ltr', 'rtl']
The direction of the text. One of ``"ltr"`` (left-to-right) or ``"rtl"``
(right-to-left). This property determines on which side is truncated in response to
the limit parameter.
**Default value:** ``"ltr"``
discreteBandSize : dict, float, :class:`RelativeBandSize`
The default size of the bars with discrete dimensions. If unspecified, the default
size is ``step-2``, which provides 2 pixel offset between bars.
dx : dict, float, :class:`ExprRef`
The horizontal offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
dy : dict, float, :class:`ExprRef`
The vertical offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
ellipsis : str, dict, :class:`ExprRef`
The ellipsis string for text truncated in response to the limit parameter.
**Default value:** ``"…"``
endAngle : dict, float, :class:`ExprRef`
The end angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
fill : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default fill color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove fill.
**Default value:** (None)
fillOpacity : dict, float, :class:`ExprRef`
The fill opacity (value between [0,1]).
**Default value:** ``1``
filled : bool
Whether the mark's color should be used as fill color instead of stroke color.
**Default value:** ``false`` for all ``point``, ``line``, and ``rule`` marks as well
as ``geoshape`` marks for `graticule
<https://vega.github.io/vega-lite/docs/data.html#graticule>`__ data sources;
otherwise, ``true``.
**Note:** This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
font : str, dict, :class:`ExprRef`
The typeface to set the text in (e.g., ``"Helvetica Neue"``).
fontSize : dict, float, :class:`ExprRef`
The font size, in pixels.
**Default value:** ``11``
fontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style (e.g., ``"italic"``).
fontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
The font weight. This can be either a string (e.g ``"bold"``, ``"normal"``) or a
number (``100``, ``200``, ``300``, ..., ``900`` where ``"normal"`` = ``400`` and
``"bold"`` = ``700``).
height : dict, float, :class:`ExprRef`
Height of the marks.
href : str, dict, :class:`URI`, :class:`ExprRef`
A URL to load upon mouse click. If defined, the mark acts as a hyperlink.
innerRadius : dict, float, :class:`ExprRef`
The inner radius in pixels of arc marks. ``innerRadius`` is an alias for
``radius2``.
**Default value:** ``0``
interpolate : dict, :class:`ExprRef`, :class:`Interpolate`, Literal['basis', 'basis-open', 'basis-closed', 'bundle', 'cardinal', 'cardinal-open', 'cardinal-closed', 'catmull-rom', 'linear', 'linear-closed', 'monotone', 'natural', 'step', 'step-before', 'step-after']
The line interpolation method to use for line and area marks. One of the following:
* ``"linear"``: piecewise linear segments, as in a polyline.
* ``"linear-closed"``: close the linear segments to form a polygon.
* ``"step"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"step-before"``: alternate between vertical and horizontal segments, as in a
step function.
* ``"step-after"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"basis"``: a B-spline, with control point duplication on the ends.
* ``"basis-open"``: an open B-spline; may not intersect the start or end.
* ``"basis-closed"``: a closed B-spline, as in a loop.
* ``"cardinal"``: a Cardinal spline, with control point duplication on the ends.
* ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end,
but will intersect other control points.
* ``"cardinal-closed"``: a closed Cardinal spline, as in a loop.
* ``"bundle"``: equivalent to basis, except the tension parameter is used to
straighten the spline.
* ``"monotone"``: cubic interpolation that preserves monotonicity in y.
invalid : :class:`MarkInvalidDataMode`, Literal['filter', 'break-paths-filter-domains', 'break-paths-show-domains', 'break-paths-show-path-domains', 'show'], None
Invalid data mode, which defines how the marks and corresponding scales should
represent invalid values (``null`` and ``NaN`` in continuous scales *without*
defined output for invalid values).
* ``"filter"`` — *Exclude* all invalid values from the visualization's *marks* and
*scales*. For path marks (for line, area, trail), this option will create paths
that connect valid points, as if the data rows with invalid values do not exist.
* ``"break-paths-filter-domains"`` — Break path marks (for line, area, trail) at
invalid values. For non-path marks, this is equivalent to ``"filter"``. All
*scale* domains will *exclude* these filtered data points.
* ``"break-paths-show-domains"`` — Break paths (for line, area, trail) at invalid
values. Hide invalid values for non-path marks. All *scale* domains will
*include* these filtered data points (for both path and non-path marks).
* ``"show"`` or ``null`` — Show all data points in the marks and scale domains. Each
scale will use the output for invalid values defined in ``config.scale.invalid``
or, if unspecified, by default invalid values will produce the same visual values
as zero (if the scale includes zero) or the minimum value (if the scale does not
include zero).
* ``"break-paths-show-path-domains"`` (default) — This is equivalent to
``"break-paths-show-domains"`` for path-based marks (line/area/trail) and
``"filter"`` for non-path marks.
**Note**: If any channel's scale has an output for invalid values defined in
``config.scale.invalid``, all values for the scales will be considered "valid" since
they can produce a reasonable output for the scales. Thus, fields for such channels
will not be filtered and will not cause path breaks.
limit : dict, float, :class:`ExprRef`
The maximum length of the text mark in pixels. The text value will be automatically
truncated if the rendered size exceeds the limit.
**Default value:** ``0`` -- indicating no limit
lineBreak : str, dict, :class:`ExprRef`
A delimiter, such as a newline character, upon which to break text strings into
multiple lines. This property is ignored if the text is array-valued.
lineHeight : dict, float, :class:`ExprRef`
The line height in pixels (the spacing between subsequent lines of text) for
multi-line text marks.
minBandSize : dict, float, :class:`ExprRef`
The minimum band size for bar and rectangle marks. **Default value:** ``0.25``
opacity : dict, float, :class:`ExprRef`
The overall opacity (value between [0,1]).
**Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``,
``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise.
order : bool, None
For line and trail marks, this ``order`` property can be set to ``null`` or
``false`` to make the lines use the original order in the data sources.
orient : :class:`Orientation`, Literal['horizontal', 'vertical']
The orientation of a non-stacked bar, tick, area, and line charts. The value is
either horizontal (default) or vertical.
* For bar, rule and tick, this determines whether the size of the bar and tick
should be applied to x or y dimension.
* For area, this property determines the orient property of the Vega output.
* For line and trail marks, this property determines the sort order of the points in
the line if ``config.sortLineBy`` is not specified. For stacked charts, this is
always determined by the orientation of the stack; therefore explicitly specified
value will be ignored.
outerRadius : dict, float, :class:`ExprRef`
The outer radius in pixels of arc marks. ``outerRadius`` is an alias for ``radius``.
**Default value:** ``0``
padAngle : dict, float, :class:`ExprRef`
The angular padding applied to sides of the arc, in radians.
radius : dict, float, :class:`ExprRef`
For arc mark, the primary (outer) radius in pixels.
For text marks, polar coordinate radial offset, in pixels, of the text from the
origin determined by the ``x`` and ``y`` properties.
**Default value:** ``min(plot_width, plot_height)/2``
radius2 : dict, float, :class:`ExprRef`
The secondary (inner) radius in pixels of arc marks.
**Default value:** ``0``
shape : str, dict, :class:`ExprRef`, :class:`SymbolShape`
Shape of the point marks. Supported values include:
* plotting shapes: ``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``,
``"triangle-up"``, ``"triangle-down"``, ``"triangle-right"``, or
``"triangle-left"``.
* the line symbol ``"stroke"``
* centered directional shapes ``"arrow"``, ``"wedge"``, or ``"triangle"``
* a custom `SVG path string
<https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For correct
sizing, custom shape paths should be defined within a square bounding box with
coordinates ranging from -1 to 1 along both the x and y dimensions.)
**Default value:** ``"circle"``
size : dict, float, :class:`ExprRef`
Default size for marks.
* For ``point``/``circle``/``square``, this represents the pixel area of the marks.
Note that this value sets the area of the symbol; the side lengths will increase
with the square root of this value.
* For ``bar``, this represents the band size of the bar, in pixels.
* For ``text``, this represents the font size, in pixels.
**Default value:**
* ``30`` for point, circle, square marks; width/height's ``step``
* ``2`` for bar marks with discrete dimensions;
* ``5`` for bar marks with continuous dimensions;
* ``11`` for text marks.
smooth : bool, dict, :class:`ExprRef`
A boolean flag (default true) indicating if the image should be smoothed when
resized. If false, individual pixels should be scaled directly rather than
interpolated with smoothing. For SVG rendering, this option may not work in some
browsers due to lack of standardization.
startAngle : dict, float, :class:`ExprRef`
The start angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
stroke : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default stroke color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove stroke.
**Default value:** (None)
strokeCap : dict, :class:`ExprRef`, :class:`StrokeCap`, Literal['butt', 'round', 'square']
The stroke cap for line ending style. One of ``"butt"``, ``"round"``, or
``"square"``.
**Default value:** ``"butt"``
strokeDash : dict, Sequence[float], :class:`ExprRef`
An array of alternating stroke, space lengths for creating dashed or dotted lines.
strokeDashOffset : dict, float, :class:`ExprRef`
The offset (in pixels) into which to begin drawing with the stroke dash array.
strokeJoin : dict, :class:`ExprRef`, :class:`StrokeJoin`, Literal['miter', 'round', 'bevel']
The stroke line join method. One of ``"miter"``, ``"round"`` or ``"bevel"``.
**Default value:** ``"miter"``
strokeMiterLimit : dict, float, :class:`ExprRef`
The miter limit at which to bevel a line join.
strokeOffset : dict, float, :class:`ExprRef`
The offset in pixels at which to draw the group stroke and fill. If unspecified, the
default behavior is to dynamically offset stroked groups such that 1 pixel stroke
widths align with the pixel grid.
strokeOpacity : dict, float, :class:`ExprRef`
The stroke opacity (value between [0,1]).
**Default value:** ``1``
strokeWidth : dict, float, :class:`ExprRef`
The stroke width, in pixels.
tension : dict, float, :class:`ExprRef`
Depending on the interpolation type, sets the tension parameter (for line and area
marks).
text : str, dict, :class:`Text`, Sequence[str], :class:`ExprRef`
Placeholder text if the ``text`` channel is not specified
theta : dict, float, :class:`ExprRef`
* For arc marks, the arc length in radians if theta2 is not specified, otherwise the
start arc angle. (A value of 0 indicates up or “north”, increasing values proceed
clockwise.)
* For text marks, polar coordinate angle in radians.
theta2 : dict, float, :class:`ExprRef`
The end angle of arc marks in radians. A value of 0 indicates up or “north”,
increasing values proceed clockwise.
time : dict, float, :class:`ExprRef`
timeUnitBandPosition : float
Default relative band position for a time unit. If set to ``0``, the marks will be
positioned at the beginning of the time unit band step. If set to ``0.5``, the marks
will be positioned in the middle of the time unit band step.
timeUnitBandSize : float
Default relative band size for a time unit. If set to ``1``, the bandwidth of the
marks will be equal to the time unit band step. If set to ``0.5``, bandwidth of the
marks will be half of the time unit band step.
tooltip : str, bool, dict, float, :class:`ExprRef`, :class:`TooltipContent`, None
The tooltip text string to show upon mouse hover or an object defining which fields
should the tooltip be derived from.
* If ``tooltip`` is ``true`` or ``{"content": "encoding"}``, then all fields from
``encoding`` will be used.
* If ``tooltip`` is ``{"content": "data"}``, then all fields that appear in the
highlighted data point will be used.
* If set to ``null`` or ``false``, then no tooltip will be used.
See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__
documentation for a detailed discussion about tooltip in Vega-Lite.
**Default value:** ``null``
url : str, dict, :class:`URI`, :class:`ExprRef`
The URL of the image file for image marks.
width : dict, float, :class:`ExprRef`
Width of the marks.
x : dict, float, :class:`ExprRef`, Literal['width']
X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without
specified ``x2`` or ``width``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
x2 : dict, float, :class:`ExprRef`, Literal['width']
X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
y : dict, float, :class:`ExprRef`, Literal['height']
Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"`` without
specified ``y2`` or ``height``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
y2 : dict, float, :class:`ExprRef`, Literal['height']
Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
"""
_schema = {"$ref": "#/definitions/BarConfig"}
def __init__(
self,
align: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
angle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
ariaRole: Optional[str | Parameter | SchemaBase | Map] = Undefined,
ariaRoleDescription: Optional[str | Parameter | SchemaBase | Map] = Undefined,
aspect: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
baseline: Optional[Parameter | SchemaBase | Map | TextBaseline_T] = Undefined,
binSpacing: Optional[float] = Undefined,
blend: Optional[Parameter | SchemaBase | Map | Blend_T] = Undefined,
color: Optional[str | Parameter | SchemaBase | Map | ColorName_T] = Undefined,
continuousBandSize: Optional[float] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusBottomLeft: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusBottomRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusEnd: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusTopLeft: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusTopRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cursor: Optional[Parameter | SchemaBase | Map | Cursor_T] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
dir: Optional[Parameter | SchemaBase | Map | TextDirection_T] = Undefined,
discreteBandSize: Optional[float | SchemaBase | Map] = Undefined,
dx: Optional[float | Parameter | SchemaBase | Map] = Undefined,
dy: Optional[float | Parameter | SchemaBase | Map] = Undefined,
ellipsis: Optional[str | Parameter | SchemaBase | Map] = Undefined,
endAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fill: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
fillOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
filled: Optional[bool] = Undefined,
font: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontWeight: Optional[Parameter | SchemaBase | Map | FontWeight_T] = Undefined,
height: Optional[float | Parameter | SchemaBase | Map] = Undefined,
href: Optional[str | Parameter | SchemaBase | Map] = Undefined,
innerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[Parameter | SchemaBase | Map | Interpolate_T] = Undefined,
invalid: Optional[SchemaBase | MarkInvalidDataMode_T | None] = Undefined,
limit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
lineBreak: Optional[str | Parameter | SchemaBase | Map] = Undefined,
lineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
minBandSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
opacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
order: Optional[bool | None] = Undefined,
orient: Optional[SchemaBase | Orientation_T] = Undefined,
outerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
padAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
shape: Optional[str | Parameter | SchemaBase | Map] = Undefined,
size: Optional[float | Parameter | SchemaBase | Map] = Undefined,
smooth: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
startAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
stroke: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
strokeCap: Optional[Parameter | SchemaBase | Map | StrokeCap_T] = Undefined,
strokeDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
strokeDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeJoin: Optional[Parameter | SchemaBase | Map | StrokeJoin_T] = Undefined,
strokeMiterLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
tension: Optional[float | Parameter | SchemaBase | Map] = Undefined,
text: Optional[str | Parameter | SchemaBase | Sequence[str] | Map] = Undefined,
theta: Optional[float | Parameter | SchemaBase | Map] = Undefined,
theta2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
time: Optional[float | Parameter | SchemaBase | Map] = Undefined,
timeUnitBandPosition: Optional[float] = Undefined,
timeUnitBandSize: Optional[float] = Undefined,
tooltip: Optional[
str | bool | float | Parameter | SchemaBase | Map | None
] = Undefined,
url: Optional[str | Parameter | SchemaBase | Map] = Undefined,
width: Optional[float | Parameter | SchemaBase | Map] = Undefined,
x: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
x2: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
y: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
y2: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
**kwds,
):
super().__init__(
align=align,
angle=angle,
aria=aria,
ariaRole=ariaRole,
ariaRoleDescription=ariaRoleDescription,
aspect=aspect,
baseline=baseline,
binSpacing=binSpacing,
blend=blend,
color=color,
continuousBandSize=continuousBandSize,
cornerRadius=cornerRadius,
cornerRadiusBottomLeft=cornerRadiusBottomLeft,
cornerRadiusBottomRight=cornerRadiusBottomRight,
cornerRadiusEnd=cornerRadiusEnd,
cornerRadiusTopLeft=cornerRadiusTopLeft,
cornerRadiusTopRight=cornerRadiusTopRight,
cursor=cursor,
description=description,
dir=dir,
discreteBandSize=discreteBandSize,
dx=dx,
dy=dy,
ellipsis=ellipsis,
endAngle=endAngle,
fill=fill,
fillOpacity=fillOpacity,
filled=filled,
font=font,
fontSize=fontSize,
fontStyle=fontStyle,
fontWeight=fontWeight,
height=height,
href=href,
innerRadius=innerRadius,
interpolate=interpolate,
invalid=invalid,
limit=limit,
lineBreak=lineBreak,
lineHeight=lineHeight,
minBandSize=minBandSize,
opacity=opacity,
order=order,
orient=orient,
outerRadius=outerRadius,
padAngle=padAngle,
radius=radius,
radius2=radius2,
shape=shape,
size=size,
smooth=smooth,
startAngle=startAngle,
stroke=stroke,
strokeCap=strokeCap,
strokeDash=strokeDash,
strokeDashOffset=strokeDashOffset,
strokeJoin=strokeJoin,
strokeMiterLimit=strokeMiterLimit,
strokeOffset=strokeOffset,
strokeOpacity=strokeOpacity,
strokeWidth=strokeWidth,
tension=tension,
text=text,
theta=theta,
theta2=theta2,
time=time,
timeUnitBandPosition=timeUnitBandPosition,
timeUnitBandSize=timeUnitBandSize,
tooltip=tooltip,
url=url,
width=width,
x=x,
x2=x2,
y=y,
y2=y2,
**kwds,
)
| BarConfig |
python | mlflow__mlflow | dev/clint/src/clint/rules/lazy_builtin_import.py | {
"start": 36,
"end": 166
} | class ____(Rule):
def _message(self) -> str:
return "Builtin modules must be imported at the top level."
| LazyBuiltinImport |
python | Textualize__textual | docs/examples/guide/screens/modal02.py | {
"start": 498,
"end": 1032
} | class ____(ModalScreen):
"""Screen with a dialog to quit."""
def compose(self) -> ComposeResult:
yield Grid(
Label("Are you sure you want to quit?", id="question"),
Button("Quit", variant="error", id="quit"),
Button("Cancel", variant="primary", id="cancel"),
id="dialog",
)
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "quit":
self.app.exit()
else:
self.app.pop_screen()
| QuitScreen |
python | django__django | django/middleware/clickjacking.py | {
"start": 272,
"end": 1724
} | class ____(MiddlewareMixin):
"""
Set the X-Frame-Options HTTP header in HTTP responses.
Do not set the header if it's already set or if the response contains
a xframe_options_exempt value set to True.
By default, set the X-Frame-Options header to 'DENY', meaning the response
cannot be displayed in a frame, regardless of the site attempting to do so.
To enable the response to be loaded on a frame within the same site, set
X_FRAME_OPTIONS in your project's Django settings to 'SAMEORIGIN'.
"""
def process_response(self, request, response):
# Don't set it if it's already in the response
if response.get("X-Frame-Options") is not None:
return response
# Don't set it if they used @xframe_options_exempt
if getattr(response, "xframe_options_exempt", False):
return response
response.headers["X-Frame-Options"] = self.get_xframe_options_value(
request,
response,
)
return response
def get_xframe_options_value(self, request, response):
"""
Get the value to set for the X_FRAME_OPTIONS header. Use the value from
the X_FRAME_OPTIONS setting, or 'DENY' if not set.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, "X_FRAME_OPTIONS", "DENY").upper()
| XFrameOptionsMiddleware |
python | cython__cython | Cython/Compiler/Optimize.py | {
"start": 68312,
"end": 85994
} | class ____(Visitor.EnvTransform):
"""Optimize some common calls to builtin types *before* the type
analysis phase and *after* the declarations analysis phase.
This transform cannot make use of any argument types, but it can
restructure the tree in a way that the type analysis phase can
respond to.
Introducing C function calls here may not be a good idea. Move
them to the OptimizeBuiltinCalls transform instead, which runs
after type analysis.
"""
# only intercept on call nodes
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
return self._dispatch_to_handler(node, function, node.args)
def visit_GeneralCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
arg_tuple = node.positional_args
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
args = arg_tuple.args
return self._dispatch_to_handler(
node, function, args, node.keyword_args)
def _function_is_builtin_name(self, function):
if not function.is_name:
return False
env = self.current_env()
entry = env.lookup(function.name)
if entry is not env.builtin_scope().lookup_here(function.name):
return False
# if entry is None, it's at least an undeclared name, so likely builtin
return True
def _dispatch_to_handler(self, node, function, args, kwargs=None):
if kwargs is None:
handler_name = '_handle_simple_function_%s' % function.name
else:
handler_name = '_handle_general_function_%s' % function.name
handle_call = getattr(self, handler_name, None)
if handle_call is not None:
if kwargs is None:
return handle_call(node, args)
else:
return handle_call(node, args, kwargs)
return node
def _inject_capi_function(self, node, cname, func_type, utility_code=None):
node.function = ExprNodes.PythonCapiFunctionNode(
node.function.pos, node.function.name, cname, func_type,
utility_code = utility_code)
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, str) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
# specific handlers for simple call nodes
def _handle_simple_function_float(self, node, pos_args):
if not pos_args:
return ExprNodes.FloatNode(node.pos, value='0.0')
if len(pos_args) > 1:
self._error_wrong_arg_count('float', node, pos_args, 1)
arg_type = getattr(pos_args[0], 'type', None)
if arg_type in (PyrexTypes.c_double_type, Builtin.float_type):
return pos_args[0]
return node
def _handle_simple_function_slice(self, node, pos_args):
arg_count = len(pos_args)
start = step = None
if arg_count == 1:
stop, = pos_args
elif arg_count == 2:
start, stop = pos_args
elif arg_count == 3:
start, stop, step = pos_args
else:
self._error_wrong_arg_count('slice', node, pos_args)
return node
return ExprNodes.SliceNode(
node.pos,
start=start or ExprNodes.NoneNode(node.pos),
stop=stop,
step=step or ExprNodes.NoneNode(node.pos))
def _handle_simple_function_ord(self, node, pos_args):
"""Unpack ord('X').
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
if len(arg.value) == 1:
return ExprNodes.IntNode.for_int(arg.pos, ord(arg.value), type=PyrexTypes.c_long_type)
return node
# sequence processing
def _handle_simple_function_all(self, node, pos_args):
"""Transform
_result = all(p(x) for L in LL for x in L)
into
for L in LL:
for x in L:
if not p(x):
return False
else:
return True
"""
return self._transform_any_all(node, pos_args, False)
def _handle_simple_function_any(self, node, pos_args):
"""Transform
_result = any(p(x) for L in LL for x in L)
into
for L in LL:
for x in L:
if p(x):
return True
else:
return False
"""
return self._transform_any_all(node, pos_args, True)
def _transform_any_all(self, node, pos_args, is_any):
if len(pos_args) != 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
generator_body = gen_expr_node.def_node.gbody
loop_node = generator_body.body
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if is_any:
condition = yield_expression
else:
condition = ExprNodes.NotNode(yield_expression.pos, operand=yield_expression)
test_node = Nodes.IfStatNode(
yield_expression.pos, else_clause=None, if_clauses=[
Nodes.IfClauseNode(
yield_expression.pos,
condition=condition,
body=Nodes.ReturnStatNode(
node.pos,
value=ExprNodes.BoolNode(yield_expression.pos, value=is_any))
)]
)
loop_node.else_clause = Nodes.ReturnStatNode(
node.pos,
value=ExprNodes.BoolNode(yield_expression.pos, value=not is_any))
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, test_node)
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, gen=gen_expr_node, orig_func='any' if is_any else 'all')
PySequence_List_func_type = PyrexTypes.CFuncType(
Builtin.list_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_sorted(self, node, pos_args):
"""Transform sorted(genexpr) and sorted([listcomp]) into
[listcomp].sort(). CPython just reads the iterable into a
list and calls .sort() on it. Expanding the iterable in a
listcomp is still faster and the result can be sorted in
place.
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.ComprehensionNode) and arg.type is Builtin.list_type:
list_node = arg
elif isinstance(arg, ExprNodes.GeneratorExpressionNode):
gen_expr_node = arg
yield_statements = _find_yield_statements(gen_expr_node.loop)
if not yield_statements:
return node
list_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='sorted',
comprehension_type=Builtin.list_type)
for yield_expression, yield_stat_node in yield_statements:
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=list_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
elif arg.is_sequence_constructor:
# sorted([a, b, c]) or sorted((a, b, c)). The result is always a list,
# so starting off with a fresh one is more efficient.
list_node = arg.as_list()
else:
# Interestingly, PySequence_List works on a lot of non-sequence
# things as well.
list_node = ExprNodes.PythonCapiCallNode(
node.pos,
"__Pyx_PySequence_ListKeepNew"
if arg.result_in_temp() and arg.type in (PyrexTypes.py_object_type, Builtin.list_type)
else "PySequence_List",
self.PySequence_List_func_type,
args=pos_args, is_temp=True)
return ExprNodes.SortedListNode(node.pos, list_node)
def __handle_simple_function_sum(self, node, pos_args):
"""Transform sum(genexpr) into an equivalent inlined aggregation loop.
"""
if len(pos_args) not in (1,2):
return node
if not isinstance(pos_args[0], (ExprNodes.GeneratorExpressionNode,
ExprNodes.ComprehensionNode)):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
if isinstance(gen_expr_node, ExprNodes.GeneratorExpressionNode):
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
# FIXME: currently nonfunctional
yield_expression = None
if yield_expression is None:
return node
else: # ComprehensionNode
yield_stat_node = gen_expr_node.append
yield_expression = yield_stat_node.expr
try:
if not yield_expression.is_literal or not yield_expression.type.is_int:
return node
except AttributeError:
return node # in case we don't have a type yet
# special case: old Py2 backwards compatible "sum([int_const for ...])"
# can safely be unpacked into a genexpr
if len(pos_args) == 1:
start = ExprNodes.IntNode.for_size(node.pos, 0)
else:
start = pos_args[1]
result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.py_object_type)
add_node = Nodes.SingleAssignmentNode(
yield_expression.pos,
lhs = result_ref,
rhs = ExprNodes.binop_node(node.pos, '+', result_ref, yield_expression)
)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, add_node)
exec_code = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
start.pos,
lhs = UtilNodes.ResultRefNode(pos=node.pos, expression=result_ref),
rhs = start,
first = True),
loop_node
])
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, loop = exec_code, result_node = result_ref,
expr_scope = gen_expr_node.expr_scope, orig_func = 'sum',
has_local_scope = gen_expr_node.has_local_scope)
def _handle_simple_function_min(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '<')
def _handle_simple_function_max(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '>')
def _optimise_min_max(self, node, args, operator):
"""Replace min(a,b,...) and max(a,b,...) by explicit comparison code.
"""
if len(args) <= 1:
if len(args) == 1 and args[0].is_sequence_constructor:
args = args[0].args
if len(args) <= 1:
# leave this to Python
return node
cascaded_nodes = list(map(UtilNodes.ResultRefNode, args[1:]))
last_result = args[0]
for arg_node in cascaded_nodes:
result_ref = UtilNodes.ResultRefNode(last_result)
last_result = ExprNodes.CondExprNode(
arg_node.pos,
true_val = arg_node,
false_val = result_ref,
test = ExprNodes.PrimaryCmpNode(
arg_node.pos,
operand1 = arg_node,
operator = operator,
operand2 = result_ref,
)
)
last_result = UtilNodes.EvalWithTempExprNode(result_ref, last_result)
for ref_node in cascaded_nodes[::-1]:
last_result = UtilNodes.EvalWithTempExprNode(ref_node, last_result)
return last_result
# builtin type creation
def _DISABLED_handle_simple_function_tuple(self, node, pos_args):
if not pos_args:
return ExprNodes.TupleNode(node.pos, args=[], constant_result=())
# This is a bit special - for iterables (including genexps),
# Python actually overallocates and resizes a newly created
# tuple incrementally while reading items, which we can't
# easily do without explicit node support. Instead, we read
# the items into a list and then copy them into a tuple of the
# final size. This takes up to twice as much memory, but will
# have to do until we have real support for genexps.
result = self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
if result is not node:
return ExprNodes.AsTupleNode(node.pos, arg=result)
return node
def _handle_simple_function_frozenset(self, node, pos_args):
"""Replace frozenset([...]) by frozenset((...)) as tuples are more efficient.
"""
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor and not pos_args[0].args:
del pos_args[0]
elif isinstance(pos_args[0], ExprNodes.ListNode):
pos_args[0] = pos_args[0].as_tuple()
return node
def _handle_simple_function_list(self, node, pos_args):
if not pos_args:
return ExprNodes.ListNode(node.pos, args=[], constant_result=[])
return self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
def _handle_simple_function_set(self, node, pos_args):
if not pos_args:
return ExprNodes.SetNode(node.pos, args=[], constant_result=set())
return self._transform_list_set_genexpr(node, pos_args, Builtin.set_type)
def _transform_list_set_genexpr(self, node, pos_args, target_type):
"""Replace set(genexpr) and list(genexpr) by an inlined comprehension.
"""
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_statements = _find_yield_statements(loop_node)
if not yield_statements:
return node
result_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node,
orig_func='set' if target_type is Builtin.set_type else 'list',
comprehension_type=target_type)
for yield_expression, yield_stat_node in yield_statements:
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=result_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
return result_node
def _handle_simple_function_dict(self, node, pos_args):
"""Replace dict( (a,b) for ... ) by an inlined { a:b for ... }
"""
if len(pos_args) == 0:
return ExprNodes.DictNode(node.pos, key_value_pairs=[], constant_result={})
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_statements = _find_yield_statements(loop_node)
if not yield_statements:
return node
for yield_expression, _ in yield_statements:
if not isinstance(yield_expression, ExprNodes.TupleNode):
return node
if len(yield_expression.args) != 2:
return node
result_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='dict',
comprehension_type=Builtin.dict_type)
for yield_expression, yield_stat_node in yield_statements:
append_node = ExprNodes.DictComprehensionAppendNode(
yield_expression.pos,
key_expr=yield_expression.args[0],
value_expr=yield_expression.args[1],
target=result_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
return result_node
# specific handlers for general call nodes
def _handle_general_function_dict(self, node, pos_args, kwargs):
"""Replace dict(a=b,c=d,...) by the underlying keyword dict
construction which is done anyway.
"""
if len(pos_args) > 0:
return node
if not isinstance(kwargs, ExprNodes.DictNode):
return node
return kwargs
| EarlyReplaceBuiltinCalls |
python | tornadoweb__tornado | tornado/test/simple_httpclient_test.py | {
"start": 28870,
"end": 29806
} | class ____(AsyncHTTPTestCase):
def get_app(self):
class SmallHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 100)
self.write("ok")
class LargeHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 1000)
self.write("ok")
return Application([("/small", SmallHeaders), ("/large", LargeHeaders)])
def get_http_client(self):
return SimpleAsyncHTTPClient(max_header_size=1024)
def test_small_headers(self):
response = self.fetch("/small")
response.rethrow()
self.assertEqual(response.body, b"ok")
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read", level=logging.INFO):
with self.assertRaises(UnsatisfiableReadError):
self.fetch("/large", raise_error=True)
| MaxHeaderSizeTest |
python | PyCQA__bandit | examples/xml_sax.py | {
"start": 58,
"end": 1043
} | class ____(xml.sax.ContentHandler):
def __init__(self):
xml.sax.ContentHandler.__init__(self)
def startElement(self, name, attrs):
print('start:', name)
def endElement(self, name):
print('end:', name)
def characters(self, content):
print('chars:', content)
def main():
xmlString = "<note>\n<to>Tove</to>\n<from>Jani</from>\n<heading>Reminder</heading>\n<body>Don't forget me this weekend!</body>\n</note>"
# bad
xml.sax.parseString(xmlString, ExampleContentHandler())
xml.sax.parse('notaxmlfilethatexists.xml', ExampleContentHandler())
sax.parseString(xmlString, ExampleContentHandler())
sax.parse('notaxmlfilethatexists.xml', ExampleContentHandler)
# good
defusedxml.sax.parseString(xmlString, ExampleContentHandler())
# bad
xml.sax.make_parser()
sax.make_parser()
print('nothing')
# good
defusedxml.sax.make_parser()
if __name__ == "__main__":
main()
| ExampleContentHandler |
python | apache__avro | lang/py/avro/compatibility.py | {
"start": 1646,
"end": 2185
} | class ____(Enum):
name_mismatch = "name_mismatch"
fixed_size_mismatch = "fixed_size_mismatch"
missing_enum_symbols = "missing_enum_symbols"
reader_field_missing_default_value = "reader_field_missing_default_value"
type_mismatch = "type_mismatch"
missing_union_branch = "missing_union_branch"
PRIMITIVE_TYPES = {
SchemaType.NULL,
SchemaType.BOOLEAN,
SchemaType.INT,
SchemaType.LONG,
SchemaType.FLOAT,
SchemaType.DOUBLE,
SchemaType.BYTES,
SchemaType.STRING,
}
| SchemaIncompatibilityType |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_batch.py | {
"start": 4958,
"end": 8421
} | class ____:
@mock.patch.object(BatchClientHook, "client")
def test_poke_no_environment(
self, mock_batch_client, batch_compute_environment_sensor: BatchComputeEnvironmentSensor
):
mock_batch_client.describe_compute_environments.return_value = {"computeEnvironments": []}
with pytest.raises(AirflowException) as ctx:
batch_compute_environment_sensor.poke({})
mock_batch_client.describe_compute_environments.assert_called_once_with(
computeEnvironments=[ENVIRONMENT_NAME],
)
assert "not found" in str(ctx.value)
@mock.patch.object(BatchClientHook, "client")
def test_poke_valid(
self, mock_batch_client, batch_compute_environment_sensor: BatchComputeEnvironmentSensor
):
mock_batch_client.describe_compute_environments.return_value = {
"computeEnvironments": [{"status": "VALID"}]
}
assert batch_compute_environment_sensor.poke({}) is True
mock_batch_client.describe_compute_environments.assert_called_once_with(
computeEnvironments=[ENVIRONMENT_NAME],
)
@mock.patch.object(BatchClientHook, "client")
def test_poke_running(
self, mock_batch_client, batch_compute_environment_sensor: BatchComputeEnvironmentSensor
):
mock_batch_client.describe_compute_environments.return_value = {
"computeEnvironments": [
{
"status": "CREATING",
}
]
}
assert batch_compute_environment_sensor.poke({}) is False
mock_batch_client.describe_compute_environments.assert_called_once_with(
computeEnvironments=[ENVIRONMENT_NAME],
)
@mock.patch.object(BatchClientHook, "client")
def test_poke_invalid(
self, mock_batch_client, batch_compute_environment_sensor: BatchComputeEnvironmentSensor
):
mock_batch_client.describe_compute_environments.return_value = {
"computeEnvironments": [
{
"status": "INVALID",
}
]
}
with pytest.raises(AirflowException) as ctx:
batch_compute_environment_sensor.poke({})
mock_batch_client.describe_compute_environments.assert_called_once_with(
computeEnvironments=[ENVIRONMENT_NAME],
)
assert "AWS Batch compute environment failed" in str(ctx.value)
@pytest.mark.parametrize(
("compute_env", "error_message"),
(
(
[{"status": "unknown_status"}],
"AWS Batch compute environment failed. AWS Batch compute environment status:",
),
([], "AWS Batch compute environment"),
),
)
@mock.patch.object(BatchClientHook, "client")
def test_fail_poke(
self,
mock_batch_client,
batch_compute_environment_sensor: BatchComputeEnvironmentSensor,
compute_env,
error_message,
):
mock_batch_client.describe_compute_environments.return_value = {"computeEnvironments": compute_env}
with pytest.raises(AirflowException, match=error_message):
batch_compute_environment_sensor.poke({})
@pytest.fixture(scope="module")
def batch_job_queue_sensor() -> BatchJobQueueSensor:
return BatchJobQueueSensor(
task_id="test_batch_job_queue_sensor",
job_queue=JOB_QUEUE,
)
| TestBatchComputeEnvironmentSensor |
python | spack__spack | lib/spack/spack/vendor/jsonschema/_reflect.py | {
"start": 407,
"end": 523
} | class ____(ValueError):
"""
The given name is not a dot-separated list of Python objects.
"""
| InvalidName |
python | ray-project__ray | python/ray/autoscaler/v2/tests/test_instance_manager.py | {
"start": 643,
"end": 15328
} | class ____(unittest.TestCase):
def test_instances_version_mismatch(self):
ins_storage = MagicMock()
subscriber = MockSubscriber()
im = InstanceManager(
ins_storage, instance_status_update_subscribers=[subscriber]
)
# Version mismatch on reading from the storage.
ins_storage.get_instances.return_value = ({}, 1)
update = InstanceUpdateEvent(
instance_id="id-1",
new_instance_status=Instance.QUEUED,
instance_type="type-1",
upsert=True,
)
reply = im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=0,
updates=[update],
)
)
assert reply.status.code == StatusCode.VERSION_MISMATCH
assert len(subscriber.events) == 0
# Version OK.
reply = im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=1,
updates=[update],
)
)
assert reply.status.code == StatusCode.OK
assert len(subscriber.events) == 1
assert subscriber.events[0].new_instance_status == Instance.QUEUED
# Version mismatch when writing to the storage (race happens)
ins_storage.batch_upsert_instances.return_value = StoreStatus(
False, 2 # No longer 1
)
subscriber.clear()
reply = im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=1,
updates=[update],
)
)
assert reply.status.code == StatusCode.VERSION_MISMATCH
assert len(subscriber.events) == 0
# Non-version mismatch error.
ins_storage.batch_upsert_instances.return_value = StoreStatus(
False, 1 # Still 1
)
reply = im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=1,
updates=[update],
)
)
assert reply.status.code == StatusCode.UNKNOWN_ERRORS
assert len(subscriber.events) == 0
def test_get_and_updates(self):
ins_storage = InstanceStorage(
"cluster-id",
InMemoryStorage(),
)
subscriber = MockSubscriber()
im = InstanceManager(
ins_storage, instance_status_update_subscribers=[subscriber]
)
# Empty storage.
reply = im.get_instance_manager_state(GetInstanceManagerStateRequest())
assert reply.status.code == StatusCode.OK
assert list(reply.state.instances) == []
# Launch nodes.
reply = im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=0,
updates=[
InstanceUpdateEvent(
instance_type="type-1",
instance_id="id-1",
new_instance_status=Instance.QUEUED,
upsert=True,
),
InstanceUpdateEvent(
instance_type="type-2",
instance_id="id-2",
new_instance_status=Instance.QUEUED,
upsert=True,
),
InstanceUpdateEvent(
instance_type="type-2",
instance_id="id-3",
new_instance_status=Instance.QUEUED,
upsert=True,
),
],
)
)
assert reply.status.code == StatusCode.OK
assert len(subscriber.events) == 3
for e in subscriber.events:
assert e.new_instance_status == Instance.QUEUED
# Get launched nodes.
reply = im.get_instance_manager_state(GetInstanceManagerStateRequest())
assert reply.status.code == StatusCode.OK
assert len(reply.state.instances) == 3
instance_ids = [ins.instance_id for ins in reply.state.instances]
types_count = defaultdict(int)
for ins in reply.state.instances:
types_count[ins.instance_type] += 1
assert ins.status == Instance.QUEUED
assert types_count["type-1"] == 1
assert types_count["type-2"] == 2
# Update node status.
subscriber.clear()
reply = im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=1,
updates=[
InstanceUpdateEvent(
instance_id=instance_ids[0],
new_instance_status=Instance.REQUESTED,
instance_type="type-1",
launch_request_id="l1",
),
InstanceUpdateEvent(
instance_id=instance_ids[1],
new_instance_status=Instance.REQUESTED,
launch_request_id="l1",
instance_type="type-1",
),
],
)
)
assert reply.status.code == StatusCode.OK
assert len(subscriber.events) == 2
for e in subscriber.events:
assert e.new_instance_status == Instance.REQUESTED
# Get updated nodes.
reply = im.get_instance_manager_state(GetInstanceManagerStateRequest())
assert reply.status.code == StatusCode.OK
assert len(reply.state.instances) == 3
types_count = defaultdict(int)
for ins in reply.state.instances:
types_count[ins.instance_type] += 1
if ins.instance_id in [instance_ids[0], instance_ids[1]]:
assert ins.status == Instance.REQUESTED
else:
assert ins.status == Instance.QUEUED
# Invalid instances status update.
subscriber.clear()
with pytest.raises(AssertionError):
reply = im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=2,
updates=[
InstanceUpdateEvent(
instance_id=instance_ids[2],
# Not requested yet.
new_instance_status=Instance.RAY_RUNNING,
),
],
)
)
assert len(subscriber.events) == 0
# Invalid versions.
reply = im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=0, # Invalid version, outdated.
updates=[
InstanceUpdateEvent(
instance_id=instance_ids[2],
new_instance_status=Instance.REQUESTED,
instance_type="type-2",
),
],
)
)
assert reply.status.code == StatusCode.VERSION_MISMATCH
assert len(subscriber.events) == 0
def test_insert(self):
ins_storage = InstanceStorage(
"cluster-id",
InMemoryStorage(),
)
subscriber = MockSubscriber()
im = InstanceManager(
ins_storage, instance_status_update_subscribers=[subscriber]
)
im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=0,
updates=[
InstanceUpdateEvent(
instance_type="type-1",
instance_id="id-1",
new_instance_status=Instance.QUEUED,
upsert=True,
),
InstanceUpdateEvent(
instance_id="id-2",
new_instance_status=Instance.TERMINATING,
cloud_instance_id="cloud-id-2",
upsert=True,
),
InstanceUpdateEvent(
instance_id="id-3",
new_instance_status=Instance.ALLOCATED,
cloud_instance_id="cloud-id-3",
node_kind=NodeKind.WORKER,
instance_type="type-3",
upsert=True,
),
],
)
)
reply = im.get_instance_manager_state(GetInstanceManagerStateRequest())
assert len(reply.state.instances) == 3
instance_by_ids = {ins.instance_id: ins for ins in reply.state.instances}
assert instance_by_ids["id-1"].status == Instance.QUEUED
assert instance_by_ids["id-1"].instance_type == "type-1"
assert instance_by_ids["id-2"].status == Instance.TERMINATING
assert instance_by_ids["id-3"].status == Instance.ALLOCATED
assert instance_by_ids["id-3"].cloud_instance_id == "cloud-id-3"
version = reply.state.version
# With non-upsert flags.
with pytest.raises(AssertionError):
reply = im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=version,
updates=[
InstanceUpdateEvent(
instance_type="type-1",
instance_id="id-999",
new_instance_status=Instance.QUEUED,
),
],
)
)
# With invalid statuses
all_statuses = set(Instance.InstanceStatus.values())
non_insertable_statuses = all_statuses - {
Instance.QUEUED,
Instance.TERMINATING,
Instance.ALLOCATED,
}
for status in non_insertable_statuses:
subscriber.clear()
with pytest.raises(AssertionError):
reply = im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=version,
updates=[
InstanceUpdateEvent(
instance_id="id-999",
new_instance_status=status,
),
],
)
)
assert len(subscriber.events) == 0
def test_apply_update(self):
ins_storage = InstanceStorage(
"cluster-id",
InMemoryStorage(),
)
subscriber = MockSubscriber()
im = InstanceManager(
ins_storage, instance_status_update_subscribers=[subscriber]
)
# Insert a new instance.
im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=0,
updates=[
InstanceUpdateEvent(
instance_type="type-1",
instance_id="id-1",
new_instance_status=Instance.QUEUED,
upsert=True,
),
],
)
)
reply = im.get_instance_manager_state(GetInstanceManagerStateRequest())
assert len(reply.state.instances) == 1
assert reply.state.instances[0].status == Instance.QUEUED
assert reply.state.instances[0].instance_type == "type-1"
# Request
im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=1,
updates=[
InstanceUpdateEvent(
instance_id="id-1",
new_instance_status=Instance.REQUESTED,
launch_request_id="l1",
instance_type="type-1",
),
],
)
)
reply = im.get_instance_manager_state(GetInstanceManagerStateRequest())
assert len(reply.state.instances) == 1
assert reply.state.instances[0].status == Instance.REQUESTED
assert reply.state.instances[0].launch_request_id == "l1"
# ALLOCATED
im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=2,
updates=[
InstanceUpdateEvent(
instance_id="id-1",
new_instance_status=Instance.ALLOCATED,
cloud_instance_id="cloud-id-1",
node_kind=NodeKind.WORKER,
instance_type="type-1",
),
],
)
)
reply = im.get_instance_manager_state(GetInstanceManagerStateRequest())
assert len(reply.state.instances) == 1
assert reply.state.instances[0].status == Instance.ALLOCATED
assert reply.state.instances[0].cloud_instance_id == "cloud-id-1"
# RAY_RUNNING
im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=3,
updates=[
InstanceUpdateEvent(
instance_id="id-1",
new_instance_status=Instance.RAY_RUNNING,
ray_node_id="ray-node-1",
),
],
)
)
reply = im.get_instance_manager_state(GetInstanceManagerStateRequest())
assert len(reply.state.instances) == 1
assert reply.state.instances[0].status == Instance.RAY_RUNNING
assert reply.state.instances[0].node_id == "ray-node-1"
# TERMINATED
im.update_instance_manager_state(
UpdateInstanceManagerStateRequest(
expected_version=4,
updates=[
InstanceUpdateEvent(
instance_id="id-1",
new_instance_status=Instance.TERMINATED,
),
],
)
)
reply = im.get_instance_manager_state(GetInstanceManagerStateRequest())
assert len(reply.state.instances) == 1
assert reply.state.instances[0].status == Instance.TERMINATED
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| InstanceManagerTest |
python | tiangolo__fastapi | docs_src/schema_extra_example/tutorial003_an.py | {
"start": 150,
"end": 721
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
@app.put("/items/{item_id}")
async def update_item(
item_id: int,
item: Annotated[
Item,
Body(
examples=[
{
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
}
],
),
],
):
results = {"item_id": item_id, "item": item}
return results
| Item |
python | kubernetes-client__python | kubernetes/client/models/v1_limit_range_list.py | {
"start": 383,
"end": 7091
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1LimitRange]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1LimitRangeList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1LimitRangeList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1LimitRangeList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1LimitRangeList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1LimitRangeList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1LimitRangeList. # noqa: E501
Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501
:return: The items of this V1LimitRangeList. # noqa: E501
:rtype: list[V1LimitRange]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1LimitRangeList.
Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501
:param items: The items of this V1LimitRangeList. # noqa: E501
:type: list[V1LimitRange]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1LimitRangeList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1LimitRangeList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1LimitRangeList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1LimitRangeList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1LimitRangeList. # noqa: E501
:return: The metadata of this V1LimitRangeList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1LimitRangeList.
:param metadata: The metadata of this V1LimitRangeList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1LimitRangeList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1LimitRangeList):
return True
return self.to_dict() != other.to_dict()
| V1LimitRangeList |
python | huggingface__transformers | src/transformers/models/qwen3_vl/modeling_qwen3_vl.py | {
"start": 4229,
"end": 7341
} | class ____(nn.Module):
def __init__(self, config: Qwen3VLVisionConfig, use_postshuffle_norm=False) -> None:
super().__init__()
self.hidden_size = config.hidden_size * (config.spatial_merge_size**2)
self.use_postshuffle_norm = use_postshuffle_norm
self.norm = nn.LayerNorm(self.hidden_size if use_postshuffle_norm else config.hidden_size, eps=1e-6)
self.linear_fc1 = nn.Linear(self.hidden_size, self.hidden_size)
self.act_fn = nn.GELU()
self.linear_fc2 = nn.Linear(self.hidden_size, config.out_hidden_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.norm(x.view(-1, self.hidden_size) if self.use_postshuffle_norm else x).view(-1, self.hidden_size)
x = self.linear_fc2(self.act_fn(self.linear_fc1(x)))
return x
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb_vision(
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
orig_q_dtype = q.dtype
orig_k_dtype = k.dtype
q, k = q.float(), k.float()
cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
q_embed = q_embed.to(orig_q_dtype)
k_embed = k_embed.to(orig_k_dtype)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| Qwen3VLVisionPatchMerger |
python | numba__numba | numba/core/types/misc.py | {
"start": 2881,
"end": 3255
} | class ____(Type):
"""
Special type representing a variable number of arguments at the
end of a function's signature. Only used for signature matching,
not for actual values.
"""
def __init__(self, dtype):
self.dtype = dtype
super(VarArg, self).__init__("*%s" % dtype)
@property
def key(self):
return self.dtype
| VarArg |
python | viewflow__viewflow | viewflow/utils.py | {
"start": 5308,
"end": 7504
} | class ____:
"""
Class representing an HTML icon element.
Attributes:
-----------
icon_name : str
The name of the icon to use.
class_ : str, optional
The CSS class to apply to the icon element.
"""
def __init__(self, icon_name: str, class_: Optional[str] = None):
self.icon_name = icon_name
self.class_ = class_ or ""
def __str__(self) -> str:
icon_name = conditional_escape(self.icon_name)
class_name = conditional_escape(self.class_)
return mark_safe(
f'<i class="material-icons ${class_name}" aria-hidden="true">{icon_name}</i>'
)
def get_object_data(obj: models.Model) -> Iterator[Tuple[models.Field, str, Any]]:
"""
List of object fields to display. Choice fields values are expanded to
readable choice label.
Returns a list of (field, label, value) tuples for the fields of the given
object.
"""
for field in obj._meta.fields:
if isinstance(field, models.AutoField):
continue
elif field.auto_created:
continue
else:
choice_display_attr = "get_{}_display".format(field.name)
if hasattr(obj, choice_display_attr):
value = getattr(obj, choice_display_attr)()
else:
value = getattr(obj, field.name)
if value is not None:
yield (field, field.verbose_name.capitalize(), value)
if (
hasattr(obj, "artifact_object_id")
and hasattr(obj, "artifact")
and obj.artifact_object_id
and obj.artifact is not None
):
yield from get_object_data(obj.artifact)
PATH_PARAMETER_COMPONENT_RE = re.compile(
r"<(?:(?P<converter>[^>:]+):)?(?P<parameter>[^>]+)>"
)
def list_path_components(route: str) -> List[str]:
"""
Extract keyword arguments from a Django path expression, which are used as
input parameters for a view function.
Example Usage:
>>> list_path_components('/prefix/<str:pk>')
['pk']
>>> list_path_components('<str:pk>/<int:id>')
['pk', 'id']
"""
return [match["parameter"] for match in PATH_PARAMETER_COMPONENT_RE.finditer(route)]
| Icon |
python | python__mypy | mypyc/ir/ops.py | {
"start": 8531,
"end": 9897
} | class ____(Value):
"""Abstract base class for all IR operations.
Each operation must be stored in a BasicBlock (in 'ops') to be
active in the IR. This is different from non-Op values, including
Register and Integer, where a reference from an active Op is
sufficient to be considered active.
In well-formed IR an active Op has no references to inactive ops
or ops used in another function.
"""
def __init__(self, line: int) -> None:
self.line = line
def can_raise(self) -> bool:
# Override this is if Op may raise an exception. Note that currently the fact that
# only RegisterOps may raise an exception in hard coded in some places.
return False
@abstractmethod
def sources(self) -> list[Value]:
"""All the values the op may read."""
@abstractmethod
def set_sources(self, new: list[Value]) -> None:
"""Rewrite the sources of an op"""
def stolen(self) -> list[Value]:
"""Return arguments that have a reference count stolen by this op"""
return []
def unique_sources(self) -> list[Value]:
result: list[Value] = []
for reg in self.sources():
if reg not in result:
result.append(reg)
return result
@abstractmethod
def accept(self, visitor: OpVisitor[T]) -> T:
pass
| Op |
python | huggingface__transformers | src/transformers/models/kosmos2_5/modeling_kosmos2_5.py | {
"start": 24134,
"end": 26600
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.head_dim = config.head_dim
self.n_heads = config.num_attention_heads
self.dropout = config.attention_dropout
self.inner_dim = self.n_heads * self.head_dim
self.is_causal = False
self.scaling = self.head_dim**-0.5
self.query = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
self.key = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
self.value = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
self.output = nn.Linear(self.inner_dim, self.hidden_size, bias=False)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
**kwargs: Unpack[TransformersKwargs],
):
"""
Self-attention block
"""
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
logger.warning_once(
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
)
else:
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1)
attn_output = self.output(attn_output)
return attn_output, attn_weights
| Kosmos2_5VisionAttention |
python | ray-project__ray | python/ray/serve/schema.py | {
"start": 59527,
"end": 59904
} | class ____(BaseModel):
"""Replica rank model."""
rank: int = Field(
description="Global rank of the replica across all nodes scoped to the deployment."
)
node_rank: int = Field(description="Rank of the node in the deployment.")
local_rank: int = Field(
description="Rank of the replica on the node scoped to the deployment."
)
| ReplicaRank |
python | doocs__leetcode | solution/0000-0099/0091.Decode Ways/Solution2.py | {
"start": 0,
"end": 287
} | class ____:
def numDecodings(self, s: str) -> int:
f, g = 0, 1
for i, c in enumerate(s, 1):
h = g if c != "0" else 0
if i > 1 and s[i - 2] != "0" and int(s[i - 2 : i]) <= 26:
h += f
f, g = g, h
return g
| Solution |
python | kamyu104__LeetCode-Solutions | Python/lowest-common-ancestor-of-a-binary-tree-iii.py | {
"start": 408,
"end": 969
} | class ____(object):
def lowestCommonAncestor(self, p, q):
"""
:type node: Node
:rtype: Node
"""
def depth(node):
d = 0
while node:
node = node.parent
d += 1
return d
p_d, q_d = depth(p), depth(q)
while p_d > q_d:
p = p.parent
p_d -= 1
while p_d < q_d:
q = q.parent
q_d -= 1
while p != q:
p = p.parent
q = q.parent
return p
| Solution2 |
python | modin-project__modin | modin/core/execution/dispatching/factories/dispatcher.py | {
"start": 1370,
"end": 2267
} | class ____(object):
"""
IO-Engine that does nothing more than raise NotImplementedError when any method is called.
Parameters
----------
factory_name : str
Factory name, which will be reflected in error messages.
Notes
-----
Used for testing purposes.
"""
def __init__(self, factory_name=""):
self.factory_name = factory_name or "Unknown"
def __getattr__(self, name):
"""
Return a function that raises `NotImplementedError` for the `name` method.
Parameters
----------
name : str
Method name to indicate in `NotImplementedError`.
Returns
-------
callable
"""
def stub(*args, **kw):
raise NotImplementedError(
f"Method {self.factory_name}.{name} is not implemented"
)
return stub
| StubIoEngine |
python | django__django | tests/decorators/test_csrf.py | {
"start": 5743,
"end": 6813
} | class ____(SimpleTestCase):
def test_wrapped_sync_function_is_not_coroutine_function(self):
def sync_view(request):
return HttpResponse()
wrapped_view = csrf_exempt(sync_view)
self.assertIs(iscoroutinefunction(wrapped_view), False)
def test_wrapped_async_function_is_coroutine_function(self):
async def async_view(request):
return HttpResponse()
wrapped_view = csrf_exempt(async_view)
self.assertIs(iscoroutinefunction(wrapped_view), True)
def test_csrf_exempt_decorator(self):
@csrf_exempt
def sync_view(request):
return HttpResponse()
self.assertIs(sync_view.csrf_exempt, True)
self.assertIsInstance(sync_view(HttpRequest()), HttpResponse)
async def test_csrf_exempt_decorator_async_view(self):
@csrf_exempt
async def async_view(request):
return HttpResponse()
self.assertIs(async_view.csrf_exempt, True)
self.assertIsInstance(await async_view(HttpRequest()), HttpResponse)
| CsrfExemptTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.