language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
django__django
|
django/forms/models.py
|
{
"start": 12106,
"end": 21125
}
|
class ____(BaseForm, AltersData):
def __init__(
self,
data=None,
files=None,
auto_id="id_%s",
prefix=None,
initial=None,
error_class=ErrorList,
label_suffix=None,
empty_permitted=False,
instance=None,
use_required_attribute=None,
renderer=None,
):
opts = self._meta
if opts.model is None:
raise ValueError("ModelForm has no model class specified.")
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_(unique|constraints) will be set to True by
# BaseModelForm.clean(). It is False by default so overriding
# self.clean() and failing to call super will stop
# validate_(unique|constraints) from being called.
self._validate_unique = False
self._validate_constraints = False
super().__init__(
data,
files,
auto_id,
prefix,
object_data,
error_class,
label_suffix,
empty_permitted,
use_required_attribute=use_required_attribute,
renderer=renderer,
)
for formfield in self.fields.values():
apply_limit_choices_to_to_formfield(formfield)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, exclude several types of fields from model
validation. See tickets #12507, #12521, #12553.
"""
exclude = set()
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.add(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.add(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.add(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors:
exclude.add(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the
# blank value may be included in a unique check, so cannot be
# excluded from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field)
if (
not f.blank
and not form_field.required
and field_value in form_field.empty_values
):
exclude.add(f.name)
return exclude
def clean(self):
self._validate_unique = True
self._validate_constraints = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
# Allow the model generated by construct_instance() to raise
# ValidationError and have them handled in the same way as others.
if hasattr(errors, "error_dict"):
error_dict = errors.error_dict
else:
error_dict = {NON_FIELD_ERRORS: errors}
for field, messages in error_dict.items():
if (
field == NON_FIELD_ERRORS
and opts.error_messages
and NON_FIELD_ERRORS in opts.error_messages
):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (
isinstance(message, ValidationError)
and message.code in error_messages
):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.add(name)
try:
self.instance = construct_instance(
self, self.instance, opts.fields, opts.exclude
)
except ValidationError as e:
self._update_errors(e)
try:
self.instance.full_clean(
exclude=exclude, validate_unique=False, validate_constraints=False
)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness and constraints if needed.
if self._validate_unique:
self.validate_unique()
if self._validate_constraints:
self.validate_constraints()
def validate_unique(self):
"""
Call the instance's validate_unique() method and update the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def validate_constraints(self):
"""
Call the instance's validate_constraints() method and update the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_constraints(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def _save_m2m(self):
"""
Save the many-to-many fields and generic relations for this form.
"""
cleaned_data = self.cleaned_data
exclude = self._meta.exclude
fields = self._meta.fields
opts = self.instance._meta
# Note that for historical reasons we want to include also
# private_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, "save_form_data"):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(self.instance, cleaned_data[f.name])
def save(self, commit=True):
"""
Save this form's self.instance object if commit=True. Otherwise, add
a save_m2m() method to the form which can be called after the instance
is saved manually at a later time. Return the model instance.
"""
if self.errors:
raise ValueError(
"The %s could not be %s because the data didn't validate."
% (
self.instance._meta.object_name,
"created" if self.instance._state.adding else "changed",
)
)
if commit:
# If committing, save the instance and the m2m data immediately.
self.instance.save()
self._save_m2m()
else:
# If not committing, add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = self._save_m2m
return self.instance
save.alters_data = True
|
BaseModelForm
|
python
|
django-import-export__django-import-export
|
import_export/exceptions.py
|
{
"start": 104,
"end": 204
}
|
class ____(ImportExportError):
"""Raised when a field encounters an error."""
pass
|
FieldError
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/batch/grpc_batch.py
|
{
"start": 1122,
"end": 15829
}
|
class ____(_BaseGRPC):
"""This class is used to insert multiple objects into Weaviate using the gRPC API.
It is used within the `_Data` and `_Batch` classes hence the necessary generalities
and abstractions so as not to couple to strongly to either use-case.
"""
def __init__(
self,
weaviate_version: _ServerVersion,
consistency_level: Optional[ConsistencyLevel],
grpc_max_msg_size: Optional[int],
):
super().__init__(weaviate_version, consistency_level, False)
self.grpc_max_msg_size = grpc_max_msg_size or MAX_GRPC_MESSAGE_LENGTH
def __single_vec(self, vectors: Optional[VECTORS]) -> Optional[bytes]:
if not _is_1d_vector(vectors):
return None
return _Pack.single(vectors)
def __multi_vec(self, vectors: Optional[VECTORS]) -> Optional[List[base_pb2.Vectors]]:
if vectors is None or _is_1d_vector(vectors):
return None
# pylance fails to type narrow TypeGuard in _is_1d_vector properly
vectors = cast(Mapping[str, Union[Sequence[float], Sequence[Sequence[float]]]], vectors)
return [
base_pb2.Vectors(name=name, vector_bytes=packing.bytes_, type=packing.type_)
for name, vec_or_vecs in vectors.items()
if (packing := _Pack.parse_single_or_multi_vec(vec_or_vecs))
]
def grpc_object(self, obj: _BatchObject) -> batch_pb2.BatchObject:
return batch_pb2.BatchObject(
collection=obj.collection,
uuid=(str(obj.uuid) if obj.uuid is not None else str(uuid_package.uuid4())),
properties=(
self.__translate_properties_from_python_to_grpc(
obj.properties,
obj.references if obj.references is not None else {},
)
if obj.properties is not None
else None
),
tenant=obj.tenant,
vector_bytes=self.__single_vec(obj.vector),
vectors=self.__multi_vec(obj.vector),
)
def grpc_objects(self, objects: List[_BatchObject]) -> List[batch_pb2.BatchObject]:
return [self.grpc_object(obj) for obj in objects]
def grpc_reference(self, reference: _BatchReference) -> batch_pb2.BatchReference:
ref = BatchReference._from_internal(reference)
return batch_pb2.BatchReference(
name=ref.from_property_name,
from_collection=ref.from_object_collection,
from_uuid=str(ref.from_object_uuid),
to_collection=ref.to_object_collection,
to_uuid=str(ref.to_object_uuid),
tenant=ref.tenant,
)
def grpc_references(self, references: List[_BatchReference]) -> List[batch_pb2.BatchReference]:
return [self.grpc_reference(ref) for ref in references]
def objects(
self,
connection: Connection,
*,
objects: List[_BatchObject],
timeout: Union[int, float],
max_retries: float,
) -> executor.Result[BatchObjectReturn]:
"""Insert multiple objects into Weaviate through the gRPC API.
Args:
connection: The connection to the Weaviate instance.
objects: A list of `WeaviateObject` containing the data of the objects to be inserted. The class name must be
provided for each object, and the UUID is optional. If no UUID is provided, one will be generated for each object.
The UUIDs of the inserted objects will be returned in the `uuids` attribute of the returned `_BatchReturn` object.
The UUIDs of the objects that failed to be inserted will be returned in the `errors` attribute of the returned `_BatchReturn` object.
timeout: The timeout in seconds for the request.
max_retries: The maximum number of retries in case of a failure.
"""
weaviate_objs = self.grpc_objects(objects)
start = time.time()
def resp(errors: Dict[int, str]) -> BatchObjectReturn:
if len(errors) == len(weaviate_objs):
# Escape sequence (backslash) not allowed in expression portion of f-string prior to Python 3.12: pylance
raise WeaviateInsertManyAllFailedError(
"Here is the set of all errors: {}".format(
"\n".join(err for err in set(errors.values()))
)
)
elapsed_time = time.time() - start
all_responses: List[Union[uuid_package.UUID, ErrorObject]] = cast(
List[Union[uuid_package.UUID, ErrorObject]],
list(range(len(weaviate_objs))),
)
return_success: Dict[int, uuid_package.UUID] = {}
return_errors: Dict[int, ErrorObject] = {}
for idx, weav_obj in enumerate(weaviate_objs):
obj = objects[idx]
if idx in errors:
error = ErrorObject(
errors[idx],
BatchObject._from_internal(obj),
original_uuid=obj.uuid,
)
return_errors[obj.index] = error
all_responses[idx] = error
else:
success = uuid_package.UUID(weav_obj.uuid)
return_success[obj.index] = success
all_responses[idx] = success
return BatchObjectReturn(
uuids=return_success,
errors=return_errors,
has_errors=len(errors) > 0,
_all_responses=all_responses,
elapsed_seconds=elapsed_time,
)
request = batch_pb2.BatchObjectsRequest(
objects=weaviate_objs,
consistency_level=self._consistency_level,
)
return executor.execute(
response_callback=resp,
method=connection.grpc_batch_objects,
request=request,
timeout=timeout,
max_retries=max_retries,
)
# def send(
# self,
# connection: ConnectionSync,
# *,
# objects: List[batch_pb2.BatchObject],
# references: List[batch_pb2.BatchReference],
# stream_id: str,
# timeout: Union[int, float],
# ) -> batch_pb2.BatchSendReply:
# """Send multiple objects to Weaviate through the gRPC API.
# Args:
# connection: The connection to the Weaviate instance.
# objects: A list of `_BatchObject` containing the data of the objects to be inserted.
# references: A list of `_BatchReference` containing the references to be inserted.
# stream_id: The ID of the stream to send the objects in relation to.
# timeout: The timeout in seconds for the request.
# max_retries: The maximum number of retries in case of a failure.
# """
# res = batch_pb2.BatchSendReply()
# for request in self.__generate_send_requests(objects, references, stream_id):
# res = connection.grpc_batch_send(
# request=request,
# timeout=timeout,
# )
# time.sleep(res.backoff_seconds)
# return res
def stream(
self,
connection: ConnectionSync,
*,
requests: Generator[batch_pb2.BatchStreamRequest, None, None],
) -> Generator[batch_pb2.BatchStreamReply, None, None]:
"""Start a new stream for receiving messages about the ongoing server-side batching from Weaviate.
Args:
connection: The connection to the Weaviate instance.
requests: A generator that yields `BatchStreamRequest` messages to be sent to the server.
"""
return connection.grpc_batch_stream(requests=requests)
def __translate_properties_from_python_to_grpc(
self, data: Dict[str, Any], refs: ReferenceInputs
) -> batch_pb2.BatchObject.Properties:
_validate_props(data)
multi_target: List[batch_pb2.BatchObject.MultiTargetRefProps] = []
single_target: List[batch_pb2.BatchObject.SingleTargetRefProps] = []
non_ref_properties: Struct = Struct()
bool_arrays: List[base_pb2.BooleanArrayProperties] = []
text_arrays: List[base_pb2.TextArrayProperties] = []
int_arrays: List[base_pb2.IntArrayProperties] = []
float_arrays: List[base_pb2.NumberArrayProperties] = []
object_properties: List[base_pb2.ObjectProperties] = []
object_array_properties: List[base_pb2.ObjectArrayProperties] = []
empty_lists: List[str] = []
for key, ref in refs.items():
if isinstance(ref, ReferenceToMulti):
multi_target.append(
batch_pb2.BatchObject.MultiTargetRefProps(
uuids=ref.uuids_str,
target_collection=ref.target_collection,
prop_name=key,
)
)
elif isinstance(ref, str) or isinstance(ref, uuid_package.UUID):
single_target.append(
batch_pb2.BatchObject.SingleTargetRefProps(uuids=[str(ref)], prop_name=key)
)
elif isinstance(ref, list):
single_target.append(
batch_pb2.BatchObject.SingleTargetRefProps(
uuids=[str(v) for v in ref], prop_name=key
)
)
else:
raise WeaviateInvalidInputError(f"Invalid reference: {ref}")
for key, entry in data.items():
if isinstance(entry, dict):
parsed = self.__translate_properties_from_python_to_grpc(entry, {})
object_properties.append(
base_pb2.ObjectProperties(
prop_name=key,
value=base_pb2.ObjectPropertiesValue(
non_ref_properties=parsed.non_ref_properties,
int_array_properties=parsed.int_array_properties,
text_array_properties=parsed.text_array_properties,
number_array_properties=parsed.number_array_properties,
boolean_array_properties=parsed.boolean_array_properties,
object_properties=parsed.object_properties,
object_array_properties=parsed.object_array_properties,
empty_list_props=parsed.empty_list_props,
),
)
)
elif isinstance(entry, list) and len(entry) == 0:
empty_lists.append(key)
elif isinstance(entry, list) and isinstance(entry[0], dict):
entry = cast(List[Dict[str, Any]], entry)
object_array_properties.append(
base_pb2.ObjectArrayProperties(
values=[
base_pb2.ObjectPropertiesValue(
non_ref_properties=parsed.non_ref_properties,
int_array_properties=parsed.int_array_properties,
text_array_properties=parsed.text_array_properties,
number_array_properties=parsed.number_array_properties,
boolean_array_properties=parsed.boolean_array_properties,
object_properties=parsed.object_properties,
object_array_properties=parsed.object_array_properties,
empty_list_props=parsed.empty_list_props,
)
for v in entry
if (parsed := self.__translate_properties_from_python_to_grpc(v, {}))
],
prop_name=key,
)
)
elif isinstance(entry, list) and isinstance(entry[0], bool):
bool_arrays.append(base_pb2.BooleanArrayProperties(prop_name=key, values=entry))
elif isinstance(entry, list) and isinstance(entry[0], str):
text_arrays.append(base_pb2.TextArrayProperties(prop_name=key, values=entry))
elif isinstance(entry, list) and isinstance(entry[0], datetime.datetime):
text_arrays.append(
base_pb2.TextArrayProperties(
prop_name=key, values=[_datetime_to_string(x) for x in entry]
)
)
elif isinstance(entry, list) and isinstance(entry[0], uuid_package.UUID):
text_arrays.append(
base_pb2.TextArrayProperties(prop_name=key, values=[str(x) for x in entry])
)
elif isinstance(entry, list) and isinstance(entry[0], int):
int_arrays.append(base_pb2.IntArrayProperties(prop_name=key, values=entry))
elif isinstance(entry, list) and isinstance(entry[0], float):
values_bytes = struct.pack("{}d".format(len(entry)), *entry)
float_arrays.append(
base_pb2.NumberArrayProperties(prop_name=key, values_bytes=values_bytes)
)
elif isinstance(entry, GeoCoordinate):
non_ref_properties.update({key: entry._to_dict()})
elif isinstance(entry, PhoneNumber):
non_ref_properties.update({key: entry._to_dict()})
else:
non_ref_properties.update({key: _serialize_primitive(entry)})
return batch_pb2.BatchObject.Properties(
non_ref_properties=non_ref_properties,
multi_target_ref_props=multi_target,
single_target_ref_props=single_target,
text_array_properties=text_arrays,
number_array_properties=float_arrays,
int_array_properties=int_arrays,
boolean_array_properties=bool_arrays,
object_properties=object_properties,
object_array_properties=object_array_properties,
empty_list_props=empty_lists,
)
def _validate_props(props: Dict[str, Any]) -> None:
if "id" in props or "vector" in props:
raise WeaviateInsertInvalidPropertyError(props)
def _serialize_primitive(value: Any) -> Any:
if isinstance(value, uuid_package.UUID):
return str(value)
if isinstance(value, datetime.datetime):
return _datetime_to_string(value)
if isinstance(value, list):
return [_serialize_primitive(val) for val in value]
return value
|
_BatchGRPC
|
python
|
pyinstaller__pyinstaller
|
tests/functional/scripts/pyi_osx_aevent_logger_carbon.py
|
{
"start": 831,
"end": 965
}
|
class ____(ctypes.Structure):
_fields_ = [
("descKey", ctypes.c_int),
("descContent", ctypes.c_void_p),
]
|
AEDesc
|
python
|
getsentry__sentry
|
tests/sentry/replays/endpoints/test_project_replay_summary.py
|
{
"start": 752,
"end": 10970
}
|
class ____(
TransactionTestCase,
SnubaTestCase,
):
endpoint = "sentry-api-0-project-replay-summary"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.replay_id = uuid.uuid4().hex
self.url = reverse(
self.endpoint,
args=(self.organization.slug, self.project.slug, self.replay_id),
)
self.features = {
"organizations:session-replay": True,
"organizations:replay-ai-summaries": True,
}
self.mock_has_seer_access_patcher = patch(
"sentry.replays.endpoints.project_replay_summary.has_seer_access",
return_value=True,
)
self.mock_has_seer_access = self.mock_has_seer_access_patcher.start()
def tearDown(self) -> None:
self.mock_has_seer_access_patcher.stop()
super().tearDown()
def store_replay(self, dt: datetime | None = None, **kwargs: Any) -> None:
replay = mock_replay(
dt or datetime.now(UTC) - timedelta(minutes=1), # Avoid clock skew query issues.
self.project.id,
self.replay_id,
**kwargs,
)
response = requests.post(
settings.SENTRY_SNUBA + "/tests/entities/replays/insert", json=[replay]
)
assert response.status_code == 200
def test_feature_flag_disabled(self) -> None:
features = [
(False, True),
(True, False),
(False, False),
]
for replay, replay_ai in features:
with self.feature(
{
"organizations:session-replay": replay,
"organizations:replay-ai-summaries": replay_ai,
}
):
for method in ["GET", "POST"]:
response = (
self.client.get(self.url) if method == "GET" else self.client.post(self.url)
)
assert response.status_code == 403, (replay, replay_ai, method)
def test_no_seer_access(self) -> None:
self.mock_has_seer_access.return_value = False
with self.feature(self.features):
for method in ["GET", "POST"]:
response = (
self.client.get(self.url) if method == "GET" else self.client.post(self.url)
)
assert response.status_code == 403, method
@patch("sentry.replays.endpoints.project_replay_summary.make_signed_seer_api_request")
def test_get_simple(self, mock_make_seer_api_request: Mock) -> None:
mock_response = MockSeerResponse(200, json_data={"hello": "world"})
mock_make_seer_api_request.return_value = mock_response
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200
assert response.json() == {"hello": "world"}
mock_make_seer_api_request.assert_called_once()
call_args = mock_make_seer_api_request.call_args
assert call_args[1]["path"] == SEER_POLL_STATE_ENDPOINT_PATH
assert json.loads(call_args[1]["body"].decode()) == {"replay_id": self.replay_id}
@patch("sentry.replays.endpoints.project_replay_summary.make_signed_seer_api_request")
def test_post_simple(self, mock_make_seer_api_request: Mock) -> None:
mock_make_seer_api_request.return_value = MockSeerResponse(
200, json_data={"hello": "world"}
)
start = datetime.now(UTC) - timedelta(days=3)
end = datetime.now(UTC) - timedelta(days=2, hours=23)
self.store_replay(dt=start, segment_id=0)
self.store_replay(dt=end, segment_id=1)
with self.feature(self.features):
response = self.client.post(
self.url, data={"num_segments": 2}, content_type="application/json"
)
assert response.status_code == 200
assert response.json() == {"hello": "world"}
mock_make_seer_api_request.assert_called_once()
call_args = mock_make_seer_api_request.call_args
assert call_args[1]["path"] == SEER_START_TASK_ENDPOINT_PATH
request_body = json.loads(call_args[1]["body"].decode())
assert request_body["replay_id"] == self.replay_id
assert abs(datetime.fromisoformat(request_body["replay_start"]) - start) <= timedelta(
seconds=1
)
assert abs(datetime.fromisoformat(request_body["replay_end"]) - end) <= timedelta(seconds=1)
assert request_body["num_segments"] == 2
assert request_body["organization_id"] == self.organization.id
assert request_body["project_id"] == self.project.id
assert request_body["temperature"] is None
def test_post_replay_not_found(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url, data={"num_segments": 2}, content_type="application/json"
)
assert response.status_code == 404
@patch("sentry.replays.endpoints.project_replay_summary.MAX_SEGMENTS_TO_SUMMARIZE", 1)
@patch("sentry.replays.endpoints.project_replay_summary.make_signed_seer_api_request")
def test_post_max_segments_exceeded(self, mock_make_seer_api_request: Mock) -> None:
mock_make_seer_api_request.return_value = MockSeerResponse(
200, json_data={"hello": "world"}
)
self.store_replay()
with self.feature(self.features):
response = self.client.post(
self.url, data={"num_segments": 2}, content_type="application/json"
)
assert response.status_code == 200
mock_make_seer_api_request.assert_called_once()
call_args = mock_make_seer_api_request.call_args
assert call_args[1]["path"] == SEER_START_TASK_ENDPOINT_PATH
request_body = json.loads(call_args[1]["body"].decode())
assert request_body["num_segments"] == 1
@patch("sentry.replays.endpoints.project_replay_summary.make_signed_seer_api_request")
def test_post_with_temperature(self, mock_make_seer_api_request: Mock) -> None:
mock_make_seer_api_request.return_value = MockSeerResponse(
200, json_data={"hello": "world"}
)
self.store_replay()
with self.feature(self.features):
response = self.client.post(
self.url,
data={"num_segments": 1, "temperature": 0.73},
content_type="application/json",
)
assert response.status_code == 200
mock_make_seer_api_request.assert_called_once()
call_args = mock_make_seer_api_request.call_args
assert call_args[1]["path"] == SEER_START_TASK_ENDPOINT_PATH
request_body = json.loads(call_args[1]["body"].decode())
assert request_body["temperature"] == 0.73
@patch("sentry.replays.endpoints.project_replay_summary.make_signed_seer_api_request")
def test_seer_timeout(self, mock_make_seer_api_request: Mock) -> None:
for method in ["GET", "POST"]:
mock_make_seer_api_request.side_effect = requests.exceptions.Timeout(
"Request timed out"
)
self.store_replay()
with self.feature(self.features):
response = (
self.client.get(self.url)
if method == "GET"
else self.client.post(
self.url, data={"num_segments": 1}, content_type="application/json"
)
)
assert response.status_code == 500, method
@patch("sentry.replays.endpoints.project_replay_summary.make_signed_seer_api_request")
def test_seer_connection_error(self, mock_make_seer_api_request: Mock) -> None:
for method in ["GET", "POST"]:
mock_make_seer_api_request.side_effect = requests.exceptions.ConnectionError(
"Connection error"
)
self.store_replay()
with self.feature(self.features):
response = (
self.client.get(self.url)
if method == "GET"
else self.client.post(
self.url, data={"num_segments": 1}, content_type="application/json"
)
)
assert response.status_code == 500, method
@patch("sentry.replays.endpoints.project_replay_summary.make_signed_seer_api_request")
def test_seer_request_error(self, mock_make_seer_api_request: Mock) -> None:
for method in ["GET", "POST"]:
mock_make_seer_api_request.side_effect = requests.exceptions.RequestException(
"Generic request error"
)
self.store_replay()
with self.feature(self.features):
response = (
self.client.get(self.url)
if method == "GET"
else self.client.post(
self.url, data={"num_segments": 1}, content_type="application/json"
)
)
assert response.status_code == 500, method
@patch("sentry.replays.endpoints.project_replay_summary.make_signed_seer_api_request")
def test_seer_http_errors(self, mock_make_seer_api_request: Mock) -> None:
for method in ["GET", "POST"]:
for status in [400, 401, 403, 404, 429, 500, 502, 503, 504]:
mock_response = MockSeerResponse(
status=status,
json_data={"error": "Test error"},
)
mock_make_seer_api_request.return_value = mock_response
self.store_replay()
with self.feature(self.features):
response = (
self.client.get(self.url)
if method == "GET"
else self.client.post(
self.url, data={"num_segments": 1}, content_type="application/json"
)
)
assert response.status_code == 500, method
|
ProjectReplaySummaryTestCase
|
python
|
google__pytype
|
pytype/pyc/compiler_test.py
|
{
"start": 220,
"end": 1341
}
|
class ____(unittest.TestCase):
"""Test python exe utilities."""
def test_parse_interpreter_version(self):
test_cases = (
("Python 3.8.3", (3, 8)),
("Python 3.8.4 :: Something custom (64-bit)", (3, 8)),
("[OS-Y 64-bit] Python 3.9.1", (3, 9)),
)
for version_str, expected in test_cases:
self.assertEqual(
expected, compiler._parse_exe_version_string(version_str)
)
def test_get_python_exe_version(self):
version = compiler._get_python_exe_version(["python"])
self.assertIsInstance(version, tuple)
self.assertEqual(len(version), 2)
def test_custom_python_exe(self):
temp = compiler._CUSTOM_PYTHON_EXES
# Since the logic for getting a custom exe checks for the file's existence
# in the pytype/ src directory, we pick an existing file to pretend to be a
# Python exe.
compiler._CUSTOM_PYTHON_EXES = {(3, 10): "utils.py"}
((exe,),) = compiler._get_python_exes((3, 10))
self.assertEqual(os.path.basename(exe), "utils.py")
compiler._CUSTOM_PYTHON_EXES = temp
if __name__ == "__main__":
unittest.main()
|
PythonExeTest
|
python
|
redis__redis-py
|
redis/asyncio/multidb/client.py
|
{
"start": 12420,
"end": 14461
}
|
class ____(AsyncRedisModuleCommands, AsyncCoreCommands):
"""
Pipeline implementation for multiple logical Redis databases.
"""
def __init__(self, client: MultiDBClient):
self._command_stack = []
self._client = client
async def __aenter__(self: "Pipeline") -> "Pipeline":
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.reset()
await self._client.__aexit__(exc_type, exc_value, traceback)
def __await__(self):
return self._async_self().__await__()
async def _async_self(self):
return self
def __len__(self) -> int:
return len(self._command_stack)
def __bool__(self) -> bool:
"""Pipeline instances should always evaluate to True"""
return True
async def reset(self) -> None:
self._command_stack = []
async def aclose(self) -> None:
"""Close the pipeline"""
await self.reset()
def pipeline_execute_command(self, *args, **options) -> "Pipeline":
"""
Stage a command to be executed when execute() is next called
Returns the current Pipeline object back so commands can be
chained together, such as:
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
At some other point, you can then run: pipe.execute(),
which will execute all commands queued in the pipe.
"""
self._command_stack.append((args, options))
return self
def execute_command(self, *args, **kwargs):
"""Adds a command to the stack"""
return self.pipeline_execute_command(*args, **kwargs)
async def execute(self) -> List[Any]:
"""Execute all the commands in the current pipeline"""
if not self._client.initialized:
await self._client.initialize()
try:
return await self._client.command_executor.execute_pipeline(
tuple(self._command_stack)
)
finally:
await self.reset()
|
Pipeline
|
python
|
numpy__numpy
|
numpy/_core/tests/test_deprecations.py
|
{
"start": 10119,
"end": 10304
}
|
class ____(_DeprecationTestCase):
# Deprecated in NumPy 1.25, 2023-01-16
def test_deprecated_none(self):
self.assert_deprecated(np.finfo, args=(None,))
|
TestDeprecatedFinfo
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_size.py
|
{
"start": 1197,
"end": 1680
}
|
class ____(scale_continuous[Literal["legend"] | None]):
"""
Continuous area size scale
"""
_aesthetics = ["size"]
range: InitVar[tuple[float, float]] = (1, 6)
"""
Range ([Minimum, Maximum]) of the size.
"""
_: KW_ONLY
guide: Literal["legend"] | None = "legend"
def __post_init__(self, range):
from mizani.palettes import area_pal
super().__post_init__()
self.palette = area_pal(range)
@alias
|
scale_size_continuous
|
python
|
numba__numba
|
numba/tests/test_mixed_tuple_unroller.py
|
{
"start": 1191,
"end": 2313
}
|
class ____(MemoryLeakMixin, TestCase):
def check(self, func, var):
cres = func.overloads[func.signatures[0]]
ty = cres.fndesc.typemap[var]
self.assertTrue(isinstance(ty, types.Tuple))
for subty in ty:
self.assertTrue(isinstance(subty, types.Literal), "non literal")
def test_homogeneous_literal(self):
@njit
def foo():
x = (1, 2, 3)
return x[1]
self.assertEqual(foo(), foo.py_func())
self.check(foo, 'x')
def test_heterogeneous_literal(self):
@njit
def foo():
x = (1, 2, 3, 'a')
return x[3]
self.assertEqual(foo(), foo.py_func())
self.check(foo, 'x')
def test_non_literal(self):
@njit
def foo():
x = (1, 2, 3, 'a', 1j)
return x[4]
self.assertEqual(foo(), foo.py_func())
with self.assertRaises(AssertionError) as e:
self.check(foo, 'x')
self.assertIn("non literal", str(e.exception))
@register_pass(mutates_CFG=False, analysis_only=False)
|
TestLiteralTupleInterpretation
|
python
|
vyperlang__vyper
|
vyper/venom/analysis/mem_ssa.py
|
{
"start": 2281,
"end": 2590
}
|
class ____(MemoryAccess):
"""Represents a use of memory state"""
def __init__(self, id: int, load_inst: IRInstruction, loc: MemoryLocation):
super().__init__(id)
self.load_inst = load_inst
self.loc = loc
@property
def inst(self):
return self.load_inst
|
MemoryUse
|
python
|
falconry__falcon
|
e2e-tests/server/hub.py
|
{
"start": 1225,
"end": 2305
}
|
class ____:
def __init__(self) -> None:
self._emitters: set[Emitter] = set()
self._users: dict[str, WebSocket] = {}
def _update_emitters(self) -> set[Emitter]:
done = {emitter for emitter in self._emitters if emitter.done}
self._emitters.difference_update(done)
return self._emitters.copy()
def add_user(self, name: str, ws: WebSocket) -> None:
self._users[name] = ws
def remove_user(self, name: str) -> None:
self._users.pop(name, None)
async def broadcast(self, message: str) -> None:
for emitter in self._update_emitters():
await emitter.enqueue(message)
async def message(self, name: str, text: str) -> None:
ws = self._users.get(name)
if ws:
# TODO(vytas): What if this overlaps with another ongoing send?
await ws.send_text(text)
def events(self) -> typing.AsyncGenerator[SSEvent | None, None]:
emitter = Emitter()
self._update_emitters()
self._emitters.add(emitter)
return emitter.events()
|
Hub
|
python
|
joke2k__faker
|
faker/providers/phone_number/fr_DZ/__init__.py
|
{
"start": 49,
"end": 180
}
|
class ____(PhoneNumberProvider):
formats = (
"055# ### ###",
"066# ### ###",
"077# ### ###",
)
|
Provider
|
python
|
getsentry__sentry
|
fixtures/safe_migrations_apps/good_flow_delete_field_simple_app/migrations/0003_delete.py
|
{
"start": 190,
"end": 494
}
|
class ____(CheckedMigration):
dependencies = [
("good_flow_delete_field_simple_app", "0002_set_pending"),
]
operations = [
SafeRemoveField(
model_name="testtable",
name="field",
deletion_action=DeletionAction.DELETE,
),
]
|
Migration
|
python
|
PyCQA__pyflakes
|
pyflakes/messages.py
|
{
"start": 7592,
"end": 7686
}
|
class ____(Message):
message = 't-string is missing placeholders'
|
TStringMissingPlaceholders
|
python
|
joke2k__faker
|
faker/providers/color/vi_VN/__init__.py
|
{
"start": 98,
"end": 2692
}
|
class ____(ColorProvider):
"""
Implement color provider for ``vi_VN`` locale.
#Sources: https://vi.wikipedia.org/wiki/Danh_s%C3%A1ch_m%C3%A0u
"""
all_colors = OrderedDict(
(
("Trắng Antique", "#FAEBD7"),
("Aquamarine", "#7FFFD4"),
("Azure", "#F0FFFF"),
("Beige", "#F5F5DC"),
("Đen", "#000000"),
("Xanh dương", "#0000FF"),
("Xanh tím", "#8A2BE2"),
("Nâu", "#A52A2A"),
("Sô cô la", "#D2691E"),
("San hô", "#FF7F50"),
("Xanh hải quân", "#6495ED"),
("Hồng đào", "#DC143C"),
("Xanh đậm", "#00008B"),
("Xanh biển đậm", "#008B8B"),
("Xám đậm", "#A9A9A9"),
("Xanh lá đậm", "#006400"),
("Rêu đậm", "#BDB76B"),
("Cam đậm", "#FF8C00"),
("Đỏ đậm", "#8B0000"),
("Xanh ngọc đậm", "#00CED1"),
("Tím đậm", "#9400D3"),
("Hồng đậm", "#FF1493"),
("Xám xỉn", "#696969"),
("Hồng fuchsia", "#FF00FF"),
("Vàng", "#FFD700"),
("Xám", "#808080"),
("Xanh lá cây", "#008000"),
("Xanh lá cây nhạt", "#ADFF2F"),
("Hồng sáng", "#FF69B4"),
("Indigo", "#4B0082"),
("Ngà voi", "#FFFFF0"),
("Rêu", "#F0E68C"),
("Hồng lavender", "#FFF0F5"),
("Xanh dương nhạt", "#ADD8E6"),
("Xanh biển nhạt", "#E0FFFF"),
("Xám sáng", "#D3D3D3"),
("Xanh lá cây sáng", "#90EE90"),
("Hồng sáng", "#FFB6C1"),
("Xanh biển sáng", "#87CEFA"),
("Vàng sáng", "#FFFFE0"),
("Hạt Dẻ", "#800000"),
("Cam", "#FFA500"),
("Cam đỏ", "#FF4500"),
("Xanh lá cây nhạt", "#98FB98"),
("Xanh biển nhạt", "#AFEEEE"),
("Hồng", "#FFC0CB"),
("Tím", "#DDA0DD"),
("Tím đậm", "#800080"),
("Đỏ", "#FF0000"),
("Xanh biển xanh", "#2E8B57"),
("Bạc", "#C0C0C0"),
("Xanh lục bảo", "#40E0D0"),
("Tím violet", "#EE82EE"),
("Trắng", "#FFFFFF"),
("Vàng", "#FFFF00"),
("Xanh lá cây vàng", "#9ACD32"),
)
)
safe_colors = (
"đen",
"đỏ rượu",
"xanh lá cây",
"rêu",
"tím",
"xanh biển",
"xanh chanh",
"xanh dương",
"bạc",
"xám",
"vàng",
"hồng fuchsia",
"trắng",
)
|
Provider
|
python
|
PrefectHQ__prefect
|
src/prefect/cli/transfer/_migratable_resources/automations.py
|
{
"start": 1069,
"end": 9627
}
|
class ____(MigratableResource[Automation]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, automation: Automation):
self.source_automation = automation
self.destination_automation: Automation | None = None
self._dependencies: dict[uuid.UUID, MigratableProtocol] = {}
@property
def source_id(self) -> uuid.UUID:
return self.source_automation.id
@property
def destination_id(self) -> uuid.UUID | None:
return self.destination_automation.id if self.destination_automation else None
@classmethod
async def construct(cls, obj: Automation) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(
cls, id: uuid.UUID
) -> "MigratableResource[Automation] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
if self._dependencies:
return list(self._dependencies.values())
async with get_client() as client:
for action in self.source_automation.actions:
if (
isinstance(action, DeploymentAction)
and action.deployment_id is not None
):
if dependency := await MigratableDeployment.get_instance(
id=action.deployment_id
):
self._dependencies[action.deployment_id] = dependency
else:
deployment = await client.read_deployment(action.deployment_id)
self._dependencies[
deployment.id
] = await construct_migratable_resource(deployment)
elif (
isinstance(action, WorkPoolAction)
and action.work_pool_id is not None
):
# TODO: Find a better way to get a work pool by id
if dependency := await MigratableWorkPool.get_instance(
id=action.work_pool_id
):
self._dependencies[action.work_pool_id] = dependency
else:
work_pool = await client.read_work_pools(
work_pool_filter=WorkPoolFilter(
id=WorkPoolFilterId(any_=[action.work_pool_id])
)
)
if work_pool:
self._dependencies[
work_pool[0].id
] = await construct_migratable_resource(work_pool[0])
elif (
isinstance(action, WorkQueueAction)
and action.work_queue_id is not None
):
if dependency := await MigratableWorkQueue.get_instance(
id=action.work_queue_id
):
self._dependencies[action.work_queue_id] = dependency
else:
work_queue = await client.read_work_queue(action.work_queue_id)
self._dependencies[
work_queue.id
] = await construct_migratable_resource(work_queue)
elif (
isinstance(action, AutomationAction)
and action.automation_id is not None
):
if dependency := await MigratableAutomation.get_instance(
id=action.automation_id
):
self._dependencies[action.automation_id] = dependency
else:
automation = await client.find_automation(action.automation_id)
if automation:
self._dependencies[
automation.id
] = await construct_migratable_resource(automation)
elif isinstance(action, CallWebhook):
if dependency := await MigratableBlockDocument.get_instance(
id=action.block_document_id
):
self._dependencies[action.block_document_id] = dependency
else:
block_document = await client.read_block_document(
action.block_document_id
)
self._dependencies[
block_document.id
] = await construct_migratable_resource(block_document)
elif isinstance(action, SendNotification):
if dependency := await MigratableBlockDocument.get_instance(
id=action.block_document_id
):
self._dependencies[action.block_document_id] = dependency
else:
block_document = await client.read_block_document(
action.block_document_id
)
self._dependencies[
block_document.id
] = await construct_migratable_resource(block_document)
return list(self._dependencies.values())
async def migrate(self) -> None:
async with get_client() as client:
automations = await client.read_automations_by_name(
name=self.source_automation.name
)
if automations:
self.destination_automation = automations[0]
raise TransferSkipped("Already exists")
else:
automation_copy = AutomationCore.model_validate(
self.source_automation.model_dump(mode="json")
)
for action in automation_copy.actions:
if (
isinstance(action, DeploymentAction)
and action.deployment_id is not None
):
action.deployment_id = self._dependencies[
action.deployment_id
].destination_id
elif (
isinstance(action, WorkPoolAction)
and action.work_pool_id is not None
):
action.work_pool_id = self._dependencies[
action.work_pool_id
].destination_id
elif (
isinstance(action, WorkQueueAction)
and action.work_queue_id is not None
):
action.work_queue_id = self._dependencies[
action.work_queue_id
].destination_id
elif (
isinstance(action, AutomationAction)
and action.automation_id is not None
):
action.automation_id = self._dependencies[
action.automation_id
].destination_id
elif isinstance(action, CallWebhook):
if destination_block_document_id := getattr(
self._dependencies.get(action.block_document_id),
"destination_id",
None,
):
action.block_document_id = destination_block_document_id
elif isinstance(action, SendNotification):
if destination_block_document_id := getattr(
self._dependencies.get(action.block_document_id),
"destination_id",
None,
):
action.block_document_id = destination_block_document_id
automation_id = await client.create_automation(
automation=automation_copy
)
self.destination_automation = await client.read_automation(
automation_id=automation_id
)
|
MigratableAutomation
|
python
|
spyder-ide__spyder
|
spyder/plugins/projects/widgets/main_widget.py
|
{
"start": 1944,
"end": 2006
}
|
class ____:
Main = 'main'
|
ProjectExplorerOptionsMenuSections
|
python
|
scrapy__scrapy
|
tests/test_exporters.py
|
{
"start": 714,
"end": 833
}
|
class ____(Item):
name = Field()
age = Field(serializer=custom_serializer)
@dataclasses.dataclass
|
CustomFieldItem
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/layout/scrollable_pane.py
|
{
"start": 616,
"end": 19264
}
|
class ____(Container):
"""
Container widget that exposes a larger virtual screen to its content and
displays it in a vertical scrollbale region.
Typically this is wrapped in a large `HSplit` container. Make sure in that
case to not specify a `height` dimension of the `HSplit`, so that it will
scale according to the content.
.. note::
If you want to display a completion menu for widgets in this
`ScrollablePane`, then it's still a good practice to use a
`FloatContainer` with a `CompletionsMenu` in a `Float` at the top-level
of the layout hierarchy, rather then nesting a `FloatContainer` in this
`ScrollablePane`. (Otherwise, it's possible that the completion menu
is clipped.)
:param content: The content container.
:param scrolloffset: Try to keep the cursor within this distance from the
top/bottom (left/right offset is not used).
:param keep_cursor_visible: When `True`, automatically scroll the pane so
that the cursor (of the focused window) is always visible.
:param keep_focused_window_visible: When `True`, automatically scroll the
pane so that the focused window is visible, or as much visible as
possible if it doesn't completely fit the screen.
:param max_available_height: Always constraint the height to this amount
for performance reasons.
:param width: When given, use this width instead of looking at the children.
:param height: When given, use this height instead of looking at the children.
:param show_scrollbar: When `True` display a scrollbar on the right.
"""
def __init__(
self,
content: Container,
scroll_offsets: ScrollOffsets | None = None,
keep_cursor_visible: FilterOrBool = True,
keep_focused_window_visible: FilterOrBool = True,
max_available_height: int = MAX_AVAILABLE_HEIGHT,
width: AnyDimension = None,
height: AnyDimension = None,
show_scrollbar: FilterOrBool = True,
display_arrows: FilterOrBool = True,
up_arrow_symbol: str = "^",
down_arrow_symbol: str = "v",
) -> None:
self.content = content
self.scroll_offsets = scroll_offsets or ScrollOffsets(top=1, bottom=1)
self.keep_cursor_visible = to_filter(keep_cursor_visible)
self.keep_focused_window_visible = to_filter(keep_focused_window_visible)
self.max_available_height = max_available_height
self.width = width
self.height = height
self.show_scrollbar = to_filter(show_scrollbar)
self.display_arrows = to_filter(display_arrows)
self.up_arrow_symbol = up_arrow_symbol
self.down_arrow_symbol = down_arrow_symbol
self.vertical_scroll = 0
def __repr__(self) -> str:
return f"ScrollablePane({self.content!r})"
def reset(self) -> None:
self.content.reset()
def preferred_width(self, max_available_width: int) -> Dimension:
if self.width is not None:
return to_dimension(self.width)
# We're only scrolling vertical. So the preferred width is equal to
# that of the content.
content_width = self.content.preferred_width(max_available_width)
# If a scrollbar needs to be displayed, add +1 to the content width.
if self.show_scrollbar():
return sum_layout_dimensions([Dimension.exact(1), content_width])
return content_width
def preferred_height(self, width: int, max_available_height: int) -> Dimension:
if self.height is not None:
return to_dimension(self.height)
# Prefer a height large enough so that it fits all the content. If not,
# we'll make the pane scrollable.
if self.show_scrollbar():
# If `show_scrollbar` is set. Always reserve space for the scrollbar.
width -= 1
dimension = self.content.preferred_height(width, self.max_available_height)
# Only take 'preferred' into account. Min/max can be anything.
return Dimension(min=0, preferred=dimension.preferred)
def write_to_screen(
self,
screen: Screen,
mouse_handlers: MouseHandlers,
write_position: WritePosition,
parent_style: str,
erase_bg: bool,
z_index: int | None,
) -> None:
"""
Render scrollable pane content.
This works by rendering on an off-screen canvas, and copying over the
visible region.
"""
show_scrollbar = self.show_scrollbar()
if show_scrollbar:
virtual_width = write_position.width - 1
else:
virtual_width = write_position.width
# Compute preferred height again.
virtual_height = self.content.preferred_height(
virtual_width, self.max_available_height
).preferred
# Ensure virtual height is at least the available height.
virtual_height = max(virtual_height, write_position.height)
virtual_height = min(virtual_height, self.max_available_height)
# First, write the content to a virtual screen, then copy over the
# visible part to the real screen.
temp_screen = Screen(default_char=Char(char=" ", style=parent_style))
temp_screen.show_cursor = screen.show_cursor
temp_write_position = WritePosition(
xpos=0, ypos=0, width=virtual_width, height=virtual_height
)
temp_mouse_handlers = MouseHandlers()
self.content.write_to_screen(
temp_screen,
temp_mouse_handlers,
temp_write_position,
parent_style,
erase_bg,
z_index,
)
temp_screen.draw_all_floats()
# If anything in the virtual screen is focused, move vertical scroll to
from prompt_toolkit.application import get_app
focused_window = get_app().layout.current_window
try:
visible_win_write_pos = temp_screen.visible_windows_to_write_positions[
focused_window
]
except KeyError:
pass # No window focused here. Don't scroll.
else:
# Make sure this window is visible.
self._make_window_visible(
write_position.height,
virtual_height,
visible_win_write_pos,
temp_screen.cursor_positions.get(focused_window),
)
# Copy over virtual screen and zero width escapes to real screen.
self._copy_over_screen(screen, temp_screen, write_position, virtual_width)
# Copy over mouse handlers.
self._copy_over_mouse_handlers(
mouse_handlers, temp_mouse_handlers, write_position, virtual_width
)
# Set screen.width/height.
ypos = write_position.ypos
xpos = write_position.xpos
screen.width = max(screen.width, xpos + virtual_width)
screen.height = max(screen.height, ypos + write_position.height)
# Copy over window write positions.
self._copy_over_write_positions(screen, temp_screen, write_position)
if temp_screen.show_cursor:
screen.show_cursor = True
# Copy over cursor positions, if they are visible.
for window, point in temp_screen.cursor_positions.items():
if (
0 <= point.x < write_position.width
and self.vertical_scroll
<= point.y
< write_position.height + self.vertical_scroll
):
screen.cursor_positions[window] = Point(
x=point.x + xpos, y=point.y + ypos - self.vertical_scroll
)
# Copy over menu positions, but clip them to the visible area.
for window, point in temp_screen.menu_positions.items():
screen.menu_positions[window] = self._clip_point_to_visible_area(
Point(x=point.x + xpos, y=point.y + ypos - self.vertical_scroll),
write_position,
)
# Draw scrollbar.
if show_scrollbar:
self._draw_scrollbar(
write_position,
virtual_height,
screen,
)
def _clip_point_to_visible_area(
self, point: Point, write_position: WritePosition
) -> Point:
"""
Ensure that the cursor and menu positions always are always reported
"""
if point.x < write_position.xpos:
point = point._replace(x=write_position.xpos)
if point.y < write_position.ypos:
point = point._replace(y=write_position.ypos)
if point.x >= write_position.xpos + write_position.width:
point = point._replace(x=write_position.xpos + write_position.width - 1)
if point.y >= write_position.ypos + write_position.height:
point = point._replace(y=write_position.ypos + write_position.height - 1)
return point
def _copy_over_screen(
self,
screen: Screen,
temp_screen: Screen,
write_position: WritePosition,
virtual_width: int,
) -> None:
"""
Copy over visible screen content and "zero width escape sequences".
"""
ypos = write_position.ypos
xpos = write_position.xpos
for y in range(write_position.height):
temp_row = temp_screen.data_buffer[y + self.vertical_scroll]
row = screen.data_buffer[y + ypos]
temp_zero_width_escapes = temp_screen.zero_width_escapes[
y + self.vertical_scroll
]
zero_width_escapes = screen.zero_width_escapes[y + ypos]
for x in range(virtual_width):
row[x + xpos] = temp_row[x]
if x in temp_zero_width_escapes:
zero_width_escapes[x + xpos] = temp_zero_width_escapes[x]
def _copy_over_mouse_handlers(
self,
mouse_handlers: MouseHandlers,
temp_mouse_handlers: MouseHandlers,
write_position: WritePosition,
virtual_width: int,
) -> None:
"""
Copy over mouse handlers from virtual screen to real screen.
Note: we take `virtual_width` because we don't want to copy over mouse
handlers that we possibly have behind the scrollbar.
"""
ypos = write_position.ypos
xpos = write_position.xpos
# Cache mouse handlers when wrapping them. Very often the same mouse
# handler is registered for many positions.
mouse_handler_wrappers: dict[MouseHandler, MouseHandler] = {}
def wrap_mouse_handler(handler: MouseHandler) -> MouseHandler:
"Wrap mouse handler. Translate coordinates in `MouseEvent`."
if handler not in mouse_handler_wrappers:
def new_handler(event: MouseEvent) -> None:
new_event = MouseEvent(
position=Point(
x=event.position.x - xpos,
y=event.position.y + self.vertical_scroll - ypos,
),
event_type=event.event_type,
button=event.button,
modifiers=event.modifiers,
)
handler(new_event)
mouse_handler_wrappers[handler] = new_handler
return mouse_handler_wrappers[handler]
# Copy handlers.
mouse_handlers_dict = mouse_handlers.mouse_handlers
temp_mouse_handlers_dict = temp_mouse_handlers.mouse_handlers
for y in range(write_position.height):
if y in temp_mouse_handlers_dict:
temp_mouse_row = temp_mouse_handlers_dict[y + self.vertical_scroll]
mouse_row = mouse_handlers_dict[y + ypos]
for x in range(virtual_width):
if x in temp_mouse_row:
mouse_row[x + xpos] = wrap_mouse_handler(temp_mouse_row[x])
def _copy_over_write_positions(
self, screen: Screen, temp_screen: Screen, write_position: WritePosition
) -> None:
"""
Copy over window write positions.
"""
ypos = write_position.ypos
xpos = write_position.xpos
for win, write_pos in temp_screen.visible_windows_to_write_positions.items():
screen.visible_windows_to_write_positions[win] = WritePosition(
xpos=write_pos.xpos + xpos,
ypos=write_pos.ypos + ypos - self.vertical_scroll,
# TODO: if the window is only partly visible, then truncate width/height.
# This could be important if we have nested ScrollablePanes.
height=write_pos.height,
width=write_pos.width,
)
def is_modal(self) -> bool:
return self.content.is_modal()
def get_key_bindings(self) -> KeyBindingsBase | None:
return self.content.get_key_bindings()
def get_children(self) -> list[Container]:
return [self.content]
def _make_window_visible(
self,
visible_height: int,
virtual_height: int,
visible_win_write_pos: WritePosition,
cursor_position: Point | None,
) -> None:
"""
Scroll the scrollable pane, so that this window becomes visible.
:param visible_height: Height of this `ScrollablePane` that is rendered.
:param virtual_height: Height of the virtual, temp screen.
:param visible_win_write_pos: `WritePosition` of the nested window on the
temp screen.
:param cursor_position: The location of the cursor position of this
window on the temp screen.
"""
# Start with maximum allowed scroll range, and then reduce according to
# the focused window and cursor position.
min_scroll = 0
max_scroll = virtual_height - visible_height
if self.keep_cursor_visible():
# Reduce min/max scroll according to the cursor in the focused window.
if cursor_position is not None:
offsets = self.scroll_offsets
cpos_min_scroll = (
cursor_position.y - visible_height + 1 + offsets.bottom
)
cpos_max_scroll = cursor_position.y - offsets.top
min_scroll = max(min_scroll, cpos_min_scroll)
max_scroll = max(0, min(max_scroll, cpos_max_scroll))
if self.keep_focused_window_visible():
# Reduce min/max scroll according to focused window position.
# If the window is small enough, bot the top and bottom of the window
# should be visible.
if visible_win_write_pos.height <= visible_height:
window_min_scroll = (
visible_win_write_pos.ypos
+ visible_win_write_pos.height
- visible_height
)
window_max_scroll = visible_win_write_pos.ypos
else:
# Window does not fit on the screen. Make sure at least the whole
# screen is occupied with this window, and nothing else is shown.
window_min_scroll = visible_win_write_pos.ypos
window_max_scroll = (
visible_win_write_pos.ypos
+ visible_win_write_pos.height
- visible_height
)
min_scroll = max(min_scroll, window_min_scroll)
max_scroll = min(max_scroll, window_max_scroll)
if min_scroll > max_scroll:
min_scroll = max_scroll # Should not happen.
# Finally, properly clip the vertical scroll.
if self.vertical_scroll > max_scroll:
self.vertical_scroll = max_scroll
if self.vertical_scroll < min_scroll:
self.vertical_scroll = min_scroll
def _draw_scrollbar(
self, write_position: WritePosition, content_height: int, screen: Screen
) -> None:
"""
Draw the scrollbar on the screen.
Note: There is some code duplication with the `ScrollbarMargin`
implementation.
"""
window_height = write_position.height
display_arrows = self.display_arrows()
if display_arrows:
window_height -= 2
try:
fraction_visible = write_position.height / float(content_height)
fraction_above = self.vertical_scroll / float(content_height)
scrollbar_height = int(
min(window_height, max(1, window_height * fraction_visible))
)
scrollbar_top = int(window_height * fraction_above)
except ZeroDivisionError:
return
else:
def is_scroll_button(row: int) -> bool:
"True if we should display a button on this row."
return scrollbar_top <= row <= scrollbar_top + scrollbar_height
xpos = write_position.xpos + write_position.width - 1
ypos = write_position.ypos
data_buffer = screen.data_buffer
# Up arrow.
if display_arrows:
data_buffer[ypos][xpos] = Char(
self.up_arrow_symbol, "class:scrollbar.arrow"
)
ypos += 1
# Scrollbar body.
scrollbar_background = "class:scrollbar.background"
scrollbar_background_start = "class:scrollbar.background,scrollbar.start"
scrollbar_button = "class:scrollbar.button"
scrollbar_button_end = "class:scrollbar.button,scrollbar.end"
for i in range(window_height):
style = ""
if is_scroll_button(i):
if not is_scroll_button(i + 1):
# Give the last cell a different style, because we want
# to underline this.
style = scrollbar_button_end
else:
style = scrollbar_button
else:
if is_scroll_button(i + 1):
style = scrollbar_background_start
else:
style = scrollbar_background
data_buffer[ypos][xpos] = Char(" ", style)
ypos += 1
# Down arrow
if display_arrows:
data_buffer[ypos][xpos] = Char(
self.down_arrow_symbol, "class:scrollbar.arrow"
)
|
ScrollablePane
|
python
|
graphql-python__graphene
|
examples/starwars_relay/schema.py
|
{
"start": 123,
"end": 393
}
|
class ____(graphene.ObjectType):
"""A ship in the Star Wars saga"""
class Meta:
interfaces = (relay.Node,)
name = graphene.String(description="The name of the ship.")
@classmethod
def get_node(cls, info, id):
return get_ship(id)
|
Ship
|
python
|
mlflow__mlflow
|
mlflow/entities/trace_location.py
|
{
"start": 716,
"end": 1455
}
|
class ____(TraceLocationBase):
"""
Represents the location of an MLflow experiment.
Args:
experiment_id: The ID of the MLflow experiment where the trace is stored.
"""
experiment_id: str
def to_proto(self):
return pb.TraceLocation.MlflowExperimentLocation(experiment_id=self.experiment_id)
@classmethod
def from_proto(cls, proto) -> "MlflowExperimentLocation":
return cls(experiment_id=proto.experiment_id)
def to_dict(self) -> dict[str, Any]:
return {"experiment_id": self.experiment_id}
@classmethod
def from_dict(cls, d: dict[str, Any]) -> "MlflowExperimentLocation":
return cls(experiment_id=d["experiment_id"])
@dataclass
|
MlflowExperimentLocation
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/layout/margins.py
|
{
"start": 8208,
"end": 10375
}
|
class ____(Margin):
"""
[Deprecated]
Create margin that displays a prompt.
This can display one prompt at the first line, and a continuation prompt
(e.g, just dots) on all the following lines.
This `PromptMargin` implementation has been largely superseded in favor of
the `get_line_prefix` attribute of `Window`. The reason is that a margin is
always a fixed width, while `get_line_prefix` can return a variable width
prefix in front of every line, making it more powerful, especially for line
continuations.
:param get_prompt: Callable returns formatted text or a list of
`(style_str, type)` tuples to be shown as the prompt at the first line.
:param get_continuation: Callable that takes three inputs. The width (int),
line_number (int), and is_soft_wrap (bool). It should return formatted
text or a list of `(style_str, type)` tuples for the next lines of the
input.
"""
def __init__(
self,
get_prompt: Callable[[], StyleAndTextTuples],
get_continuation: None
| (Callable[[int, int, bool], StyleAndTextTuples]) = None,
) -> None:
self.get_prompt = get_prompt
self.get_continuation = get_continuation
def get_width(self, get_ui_content: Callable[[], UIContent]) -> int:
"Width to report to the `Window`."
# Take the width from the first line.
text = fragment_list_to_text(self.get_prompt())
return get_cwidth(text)
def create_margin(
self, window_render_info: WindowRenderInfo, width: int, height: int
) -> StyleAndTextTuples:
get_continuation = self.get_continuation
result: StyleAndTextTuples = []
# First line.
result.extend(to_formatted_text(self.get_prompt()))
# Next lines.
if get_continuation:
last_y = None
for y in window_render_info.displayed_lines[1:]:
result.append(("", "\n"))
result.extend(
to_formatted_text(get_continuation(width, y, y == last_y))
)
last_y = y
return result
|
PromptMargin
|
python
|
sqlalchemy__sqlalchemy
|
examples/space_invaders/space_invaders.py
|
{
"start": 6627,
"end": 6740
}
|
class ____(Glyph):
"""Describe an enemy."""
__mapper_args__ = {"polymorphic_identity": "enemy"}
|
EnemyGlyph
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/langchain/agents/structured_output.py
|
{
"start": 8893,
"end": 10678
}
|
class ____(Generic[SchemaT]):
"""Information for tracking structured output tool metadata.
This contains all necessary information to handle structured responses
generated via tool calls, including the original schema, its type classification,
and the corresponding tool implementation used by the tools strategy.
"""
schema: type[SchemaT]
"""The original schema provided for structured output
(Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
schema_kind: SchemaKind
"""Classification of the schema type for proper response construction."""
tool: BaseTool
"""LangChain tool instance created from the schema for model binding."""
@classmethod
def from_schema_spec(cls, schema_spec: _SchemaSpec[SchemaT]) -> Self:
"""Create an `OutputToolBinding` instance from a `SchemaSpec`.
Args:
schema_spec: The `SchemaSpec` to convert
Returns:
An `OutputToolBinding` instance with the appropriate tool created
"""
return cls(
schema=schema_spec.schema,
schema_kind=schema_spec.schema_kind,
tool=StructuredTool(
args_schema=schema_spec.json_schema,
name=schema_spec.name,
description=schema_spec.description,
),
)
def parse(self, tool_args: dict[str, Any]) -> SchemaT:
"""Parse tool arguments according to the schema.
Args:
tool_args: The arguments from the tool call
Returns:
The parsed response according to the schema type
Raises:
ValueError: If parsing fails
"""
return _parse_with_schema(self.schema, self.schema_kind, tool_args)
@dataclass
|
OutputToolBinding
|
python
|
celery__celery
|
celery/concurrency/eventlet.py
|
{
"start": 2302,
"end": 5126
}
|
class ____(base.BasePool):
"""Eventlet Task Pool."""
Timer = Timer
signal_safe = False
is_green = True
task_join_will_block = False
_pool = None
_pool_map = None
_quick_put = None
def __init__(self, *args, **kwargs):
from eventlet import greenthread
from eventlet.greenpool import GreenPool
self.Pool = GreenPool
self.getcurrent = greenthread.getcurrent
self.getpid = lambda: id(greenthread.getcurrent())
self.spawn_n = greenthread.spawn_n
super().__init__(*args, **kwargs)
def on_start(self):
self._pool = self.Pool(self.limit)
self._pool_map = {}
signals.eventlet_pool_started.send(sender=self)
self._quick_put = self._pool.spawn
self._quick_apply_sig = signals.eventlet_pool_apply.send
def on_stop(self):
signals.eventlet_pool_preshutdown.send(sender=self)
if self._pool is not None:
self._pool.waitall()
signals.eventlet_pool_postshutdown.send(sender=self)
def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, **_):
target = TaskPool._make_killable_target(target)
self._quick_apply_sig(sender=self, target=target, args=args, kwargs=kwargs,)
greenlet = self._quick_put(
apply_target,
target, args,
kwargs,
callback,
accept_callback,
self.getpid
)
self._add_to_pool_map(id(greenlet), greenlet)
def grow(self, n=1):
limit = self.limit + n
self._pool.resize(limit)
self.limit = limit
def shrink(self, n=1):
limit = self.limit - n
self._pool.resize(limit)
self.limit = limit
def terminate_job(self, pid, signal=None):
if pid in self._pool_map.keys():
greenlet = self._pool_map[pid]
greenlet.kill()
greenlet.wait()
def _get_info(self):
info = super()._get_info()
info.update({
'max-concurrency': self.limit,
'free-threads': self._pool.free(),
'running-threads': self._pool.running(),
})
return info
@staticmethod
def _make_killable_target(target):
def killable_target(*args, **kwargs):
try:
return target(*args, **kwargs)
except GreenletExit:
return (False, None, None)
return killable_target
def _add_to_pool_map(self, pid, greenlet):
self._pool_map[pid] = greenlet
greenlet.link(
TaskPool._cleanup_after_job_finish,
self._pool_map,
pid
)
@staticmethod
def _cleanup_after_job_finish(greenlet, pool_map, pid):
del pool_map[pid]
|
TaskPool
|
python
|
python-visualization__folium
|
folium/folium.py
|
{
"start": 2324,
"end": 17564
}
|
class ____(JSCSSMixin, Evented):
"""Create a Map with Folium and Leaflet.js
Generate a base map of given width and height with either default
tilesets or a custom tileset URL. Folium has built-in all tilesets
available in the ``xyzservices`` package. For example, you can pass
any of the following to the "tiles" keyword:
- "OpenStreetMap"
- "CartoDB Positron"
- "CartoDB Voyager"
Explore more provider names available in ``xyzservices`` here:
https://leaflet-extras.github.io/leaflet-providers/preview/.
You can also pass a custom tileset by passing a
:class:`xyzservices.TileProvider` or a Leaflet-style
URL to the tiles parameter: ``https://{s}.yourtiles.com/{z}/{x}/{y}.png``.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Map (Northing, Easting).
width: pixel int or percentage string (default: '100%')
Width of the map.
height: pixel int or percentage string (default: '100%')
Height of the map.
tiles: str or TileLayer or :class:`xyzservices.TileProvider`, default 'OpenStreetMap'
Map tileset to use. Can choose from a list of built-in tiles,
pass a :class:`xyzservices.TileProvider`,
pass a custom URL, pass a TileLayer object,
or pass `None` to create a map without tiles.
For more advanced tile layer options, use the `TileLayer` class.
min_zoom: int, optional, default 0
Minimum allowed zoom level for the tile layer that is created.
Filled by xyzservices by default.
max_zoom: int, optional, default 18
Maximum allowed zoom level for the tile layer that is created.
Filled by xyzservices by default.
zoom_start: int, default 10
Initial zoom level for the map.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
crs : str, default 'EPSG3857'
Defines coordinate reference systems for projecting geographical points
into pixel (screen) coordinates and back.
You can use Leaflet's values :
* EPSG3857 : The most common CRS for online maps, used by almost all
free and commercial tile providers. Uses Spherical Mercator projection.
Set in by default in Map's crs option.
* EPSG4326 : A common CRS among GIS enthusiasts.
Uses simple Equirectangular projection.
* EPSG3395 : Rarely used by some commercial tile providers.
Uses Elliptical Mercator projection.
* Simple : A simple CRS that maps longitude and latitude into
x and y directly. May be used for maps of flat surfaces
(e.g. game maps). Note that the y axis should still be inverted
(going from bottom to top).
control_scale : bool, default False
Whether to add a control scale on the map.
prefer_canvas : bool, default False
Forces Leaflet to use the Canvas back-end (if available) for
vector layers instead of SVG. This can increase performance
considerably in some cases (e.g. many thousands of circle
markers on the map).
no_touch : bool, default False
Forces Leaflet to not use touch events even if it detects them.
disable_3d : bool, default False
Forces Leaflet to not use hardware-accelerated CSS 3D
transforms for positioning (which may cause glitches in some
rare environments) even if they're supported.
zoom_control : bool or position string, default True
Display zoom controls on the map. The default `True` places it in the top left corner.
Other options are 'topleft', 'topright', 'bottomleft' or 'bottomright'.
font_size : int or float or string (default: '1rem')
The font size to use for Leaflet, can either be a number or a
string ending in 'rem', 'em', or 'px'.
**kwargs
Additional keyword arguments are passed to Leaflets Map class:
https://leafletjs.com/reference.html#map
Returns
-------
Folium Map Object
Examples
--------
>>> m = folium.Map(location=[45.523, -122.675], width=750, height=500)
>>> m = folium.Map(location=[45.523, -122.675], tiles="cartodb positron")
>>> m = folium.Map(
... location=[45.523, -122.675],
... zoom_start=2,
... tiles="https://api.mapbox.com/v4/mapbox.streets/{z}/{x}/{y}.png?access_token=mytoken",
... attr="Mapbox attribution",
... )
""" # noqa
_template = Template(
"""
{% macro header(this, kwargs) %}
<meta name="viewport" content="width=device-width,
initial-scale=1.0, maximum-scale=1.0, user-scalable=no" />
<style>
#{{ this.get_name() }} {
position: {{this.position}};
width: {{this.width[0]}}{{this.width[1]}};
height: {{this.height[0]}}{{this.height[1]}};
left: {{this.left[0]}}{{this.left[1]}};
top: {{this.top[0]}}{{this.top[1]}};
}
.leaflet-container { font-size: {{this.font_size}}; }
</style>
<style>html, body {
width: 100%;
height: 100%;
margin: 0;
padding: 0;
}
</style>
<style>#map {
position:absolute;
top:0;
bottom:0;
right:0;
left:0;
}
</style>
<script>
L_NO_TOUCH = {{ this.global_switches.no_touch |tojson}};
L_DISABLE_3D = {{ this.global_switches.disable_3d|tojson }};
</script>
{% endmacro %}
{% macro html(this, kwargs) %}
<div class="folium-map" id={{ this.get_name()|tojson }} ></div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.map(
{{ this.get_name()|tojson }},
{
center: {{ this.location|tojson }},
crs: L.CRS.{{ this.crs }},
...{{this.options|tojavascript}}
}
);
{%- if this.control_scale %}
L.control.scale().addTo({{ this.get_name() }});
{%- endif %}
{%- if this.zoom_control_position %}
L.control.zoom( { position: {{ this.zoom_control|tojson }} } ).addTo({{ this.get_name() }});
{%- endif %}
{% if this.objects_to_stay_in_front %}
function objects_in_front() {
{%- for obj in this.objects_to_stay_in_front %}
{{ obj.get_name() }}.bringToFront();
{%- endfor %}
};
{{ this.get_name() }}.on("overlayadd", objects_in_front);
$(document).ready(objects_in_front);
{%- endif %}
{% endmacro %}
"""
)
# use the module variables for backwards compatibility
default_js = _default_js
default_css = _default_css
def __init__(
self,
location: Optional[Sequence[float]] = None,
width: Union[str, float] = "100%",
height: Union[str, float] = "100%",
left: Union[str, float] = "0%",
top: Union[str, float] = "0%",
position: str = "relative",
tiles: Union[str, TileLayer, None] = "OpenStreetMap",
attr: Optional[str] = None,
min_zoom: Optional[int] = None,
max_zoom: Optional[int] = None,
zoom_start: int = 10,
min_lat: float = -90,
max_lat: float = 90,
min_lon: float = -180,
max_lon: float = 180,
max_bounds: bool = False,
crs: str = "EPSG3857",
control_scale: bool = False,
prefer_canvas: bool = False,
no_touch: bool = False,
disable_3d: bool = False,
png_enabled: bool = False,
zoom_control: Union[bool, str] = True,
font_size: str = "1rem",
**kwargs: TypeJsonValue,
):
super().__init__()
self._name = "Map"
self._png_image: Optional[bytes] = None
self.png_enabled = png_enabled
if location is None:
# If location is not passed we center and zoom out.
self.location = [0.0, 0.0]
zoom_start = 1
else:
self.location = validate_location(location)
Figure().add_child(self)
# Map Size Parameters.
self.width = _parse_size(width)
self.height = _parse_size(height)
self.left = _parse_size(left)
self.top = _parse_size(top)
self.position = position
self.font_size = parse_font_size(font_size)
max_bounds_array = (
[[min_lat, min_lon], [max_lat, max_lon]] if max_bounds else None
)
self.crs = crs
self.control_scale = control_scale
# Zoom control position specified ?
if isinstance(zoom_control, str):
self.zoom_control_position = True
if zoom_control not in {"topleft", "topright", "bottomleft", "bottomright"}:
raise ValueError(
"Incorrect value for `zoom_control`, choose from 'topleft', 'topright', 'bottomleft' or 'bottomright'."
)
self.zoom_control = zoom_control
else:
self.zoom_control_position = False
self.global_switches = GlobalSwitches(no_touch, disable_3d)
self.options = remove_empty(
max_bounds=max_bounds_array,
zoom=zoom_start,
zoom_control=False if self.zoom_control_position else zoom_control,
prefer_canvas=prefer_canvas,
**kwargs,
)
self.objects_to_stay_in_front: list[Layer] = []
if isinstance(tiles, TileLayer):
self.add_child(tiles)
elif tiles:
tile_layer = TileLayer(
tiles=tiles, attr=attr, min_zoom=min_zoom, max_zoom=max_zoom
)
self.add_child(tile_layer, name=tile_layer.tile_name)
def _repr_html_(self, **kwargs) -> str:
"""Displays the HTML Map in a Jupyter notebook."""
if self._parent is None:
self.add_to(Figure())
self._parent: Figure
out = self._parent._repr_html_(**kwargs)
self._parent = None
else:
out = self._parent._repr_html_(**kwargs)
return out
def _to_png(
self, delay: int = 3, driver: Any = None, size: Optional[Sequence[int]] = None
) -> bytes:
"""Export the HTML to byte representation of a PNG image.
Uses selenium to render the HTML and record a PNG. You may need to
adjust the `delay` time keyword argument if maps render without data or tiles.
Uses a headless Firefox webdriver by default, though you can provide your own.
Examples
--------
>>> m._to_png()
>>> m._to_png(time=10) # Wait 10 seconds between render and snapshot.
"""
if self._png_image is None:
if driver is None:
from selenium import webdriver
options = webdriver.firefox.options.Options()
options.add_argument("--headless")
driver = webdriver.Firefox(options=options)
if size is None:
driver.fullscreen_window()
else:
window_size = driver.execute_script(
"""
return [window.outerWidth - window.innerWidth + arguments[0],
window.outerHeight - window.innerHeight + arguments[1]];
""",
*size,
)
driver.set_window_size(*window_size)
html = self.get_root().render()
with temp_html_filepath(html) as fname:
# We need the tempfile to avoid JS security issues.
driver.get(f"file:///{fname}")
time.sleep(delay)
div = driver.find_element("class name", "folium-map")
png = div.screenshot_as_png
driver.quit()
self._png_image = png
return self._png_image
def _repr_png_(self) -> Optional[bytes]:
"""Displays the PNG Map in a Jupyter notebook."""
# The notebook calls all _repr_*_ by default.
# We don't want that here b/c this one is quite slow.
if not self.png_enabled:
return None
return self._to_png()
def show_in_browser(self) -> None:
"""Display the Map in the default web browser."""
with temp_html_filepath(self.get_root().render()) as fname:
webbrowser.open("file://" + fname)
print(
"Your map should have been opened in your browser automatically."
"\nPress ctrl+c to return."
)
# Block until stopped by user, afterwards remove the temporary file
try:
while True:
time.sleep(100)
except KeyboardInterrupt:
pass
def fit_bounds(
self,
bounds: TypeBounds,
padding_top_left: Optional[Sequence[float]] = None,
padding_bottom_right: Optional[Sequence[float]] = None,
padding: Optional[Sequence[float]] = None,
max_zoom: Optional[int] = None,
) -> None:
"""Fit the map to contain a bounding box with the
maximum zoom level possible.
Parameters
----------
bounds: list of (latitude, longitude) points
Bounding box specified as two points [southwest, northeast]
padding_top_left: (x, y) point, default None
Padding in the top left corner. Useful if some elements in
the corner, such as controls, might obscure objects you're zooming
to.
padding_bottom_right: (x, y) point, default None
Padding in the bottom right corner.
padding: (x, y) point, default None
Equivalent to setting both top left and bottom right padding to
the same value.
max_zoom: int, default None
Maximum zoom to be used.
Examples
--------
>>> m.fit_bounds([[52.193636, -2.221575], [52.636878, -1.139759]])
"""
self.add_child(
FitBounds(
bounds,
padding_top_left=padding_top_left,
padding_bottom_right=padding_bottom_right,
padding=padding,
max_zoom=max_zoom,
)
)
def keep_in_front(self, *args: Layer) -> None:
"""Pass one or multiple layers that must stay in front.
The ordering matters, the last one is put on top.
Parameters
----------
*args :
Variable length argument list. Any folium object that counts as an
overlay. For example FeatureGroup or TileLayer.
Does not work with markers, for those use z_index_offset.
"""
for obj in args:
self.objects_to_stay_in_front.append(obj)
|
Map
|
python
|
realpython__materials
|
tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/library/src/tic_tac_toe/logic/exceptions.py
|
{
"start": 161,
"end": 245
}
|
class ____(Exception):
"""Raised when the game score is unknown."""
|
UnknownGameScore
|
python
|
fluentpython__example-code
|
attic/metaprog/plainpoint.py
|
{
"start": 173,
"end": 688
}
|
class ____(object):
__slots__ = ['x', 'y'] # save memory in the likely event there are many instances
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return 'Point({!r}, {!r})'.format(self.x, self.y)
def __eq__(self, other):
if not isinstance(other, Point):
return NotImplemented
return self.x == other.x and self.y == other.y
def __iter__(self, other): # support unpacking
yield self.x
yield self.y
|
Point
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/handlers/condition/test_event_frequency_handlers.py
|
{
"start": 13623,
"end": 18789
}
|
class ____(ConditionTestCase):
def setUp(self) -> None:
super().setUp()
self.condition = Condition.EVENT_UNIQUE_USER_FREQUENCY_COUNT
self.payload: dict[str, str | int | float] = {
"interval": "1h",
"id": EventUniqueUserFrequencyConditionWithConditions.id,
"value": 50,
"comparisonType": ComparisonType.COUNT,
}
self.conditions = [
{
"id": TaggedEventFilter.id,
"match": MatchType.EQUAL,
"key": "LOGGER",
"value": "sentry.example",
},
{
"id": TaggedEventFilter.id,
"match": MatchType.IS_SET,
"key": "environment",
},
{
"id": EventAttributeFilter.id,
"match": MatchType.EQUAL,
"value": "hi",
"attribute": "message",
},
]
self.expected_filters = [
{
"match": MatchType.EQUAL,
"key": self.conditions[0]["key"],
"value": self.conditions[0]["value"],
},
{"match": MatchType.IS_SET, "key": self.conditions[1]["key"]},
{
"match": MatchType.EQUAL,
"attribute": self.conditions[2]["attribute"],
"value": self.conditions[2]["value"],
},
]
self.dcg = self.create_data_condition_group()
def _test_dual_write_count(self, value):
dc = create_event_unique_user_frequency_condition_with_conditions(
self.payload, self.dcg, self.conditions
)
assert dc.type == self.condition
assert dc.comparison == {
"interval": self.payload["interval"],
"value": self.payload["value"],
"filters": self.expected_filters,
}
assert dc.condition_result is True
assert dc.condition_group == self.dcg
def test_dual_write_count(self) -> None:
self._test_dual_write_count(self.payload["value"])
def test_dual_write_count__string_value(self) -> None:
self._test_dual_write_count(str(self.payload["value"]))
def test_dual_write_count__value_floor(self) -> None:
# forces negative to zero for migration
self.payload["value"] = 0 # expected
self._test_dual_write_count(-1)
def _test_dual_write_percent(self, value):
self.payload.update({"comparisonType": ComparisonType.PERCENT, "comparisonInterval": "1d"})
dc = create_event_unique_user_frequency_condition_with_conditions(
self.payload, self.dcg, self.conditions
)
assert dc.type == Condition.EVENT_UNIQUE_USER_FREQUENCY_PERCENT
assert dc.comparison == {
"interval": self.payload["interval"],
"value": self.payload["value"],
"comparison_interval": self.payload["comparisonInterval"],
"filters": self.expected_filters,
}
assert dc.condition_result is True
assert dc.condition_group == self.dcg
def test_dual_write_percent(self) -> None:
self._test_dual_write_percent(self.payload["value"])
def test_dual_write_percent__string_value(self) -> None:
self._test_dual_write_percent(str(self.payload["value"]))
def test_dual_write_count__percent_floor(self) -> None:
# forces negative to zero for migration
self.payload["value"] = 0 # expected
self._test_dual_write_percent(-1)
def test_dual_write__invalid(self) -> None:
with pytest.raises(KeyError):
create_event_unique_user_frequency_condition_with_conditions(
self.payload,
self.dcg,
[
{
"id": EventAttributeFilter.id,
"match": MatchType.EQUAL,
"value": "hi",
},
],
)
with pytest.raises(ValueError): # unsupported filter condition
create_event_unique_user_frequency_condition_with_conditions(
self.payload,
self.dcg,
[
{
"id": FirstSeenEventCondition.id,
},
],
)
def test_json_schema(self) -> None:
with pytest.raises(ValidationError):
self.create_data_condition(
type=self.condition,
comparison={
"interval": "asdf",
"value": "100",
"filters": "asdf",
},
condition_result=True,
)
with pytest.raises(ValidationError):
self.create_data_condition(
type=self.condition,
comparison={
"interval": "1d",
"value": "100",
"filters": [{"interval": "1d", "value": "100"}],
},
condition_result=True,
)
|
TestEventUniqueUserFrequencyConditionWithConditions
|
python
|
geekcomputers__Python
|
Grocery calculator.py
|
{
"start": 502,
"end": 1573
}
|
class ____(dict):
def __init__(self):
self = {}
def addToList(self, item, price):
self.update({item: price})
def Total(self):
total = 0
for items in self:
total += (self[items]) * 0.07 + (self[items])
return total
def Subtotal(self):
subtotal = 0
for items in self:
subtotal += self[items]
return subtotal
def returnList(self):
return self
"""Test list should return:
Total = 10.70
Subtotal = 10
returnList = {"milk":4, "eggs":3, "kombucha":3}
"""
List1 = GroceryList()
List1.addToList("milk", 4)
List1.addToList("eggs", 3)
List1.addToList("kombucha", 3)
print(List1.Total())
print(List1.Subtotal())
print(List1.returnList())
# *****************************************************
print()
# *****************************************************
List2 = GroceryList()
List2.addToList("cheese", 7.49)
List2.addToList("wine", 25.36)
List2.addToList("steak", 17.64)
print(List2.Total())
print(List2.Subtotal())
print(List2.returnList())
|
GroceryList
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/trainer/test_trainer.py
|
{
"start": 47073,
"end": 47364
}
|
class ____(LightningDataModule):
def __init__(self, dataloaders):
super().__init__()
self._dataloaders = dataloaders
def test_dataloader(self):
return self._dataloaders
def predict_dataloader(self):
return self._dataloaders
|
TestLightningDataModule
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/live-tests/src/live_tests/commons/backends/base_backend.py
|
{
"start": 236,
"end": 454
}
|
class ____(ABC):
"""
Interface to be shared between the file backend and the database backend(s)
"""
@abstractmethod
def write(self, airbyte_messages: Iterable[AirbyteMessage]) -> None: ...
|
BaseBackend
|
python
|
openai__openai-python
|
src/openai/types/audio/transcription.py
|
{
"start": 630,
"end": 872
}
|
class ____(BaseModel):
audio_tokens: Optional[int] = None
"""Number of audio tokens billed for this request."""
text_tokens: Optional[int] = None
"""Number of text tokens billed for this request."""
|
UsageTokensInputTokenDetails
|
python
|
numba__numba
|
numba/tests/test_types.py
|
{
"start": 11472,
"end": 16048
}
|
class ____(TestCase):
def test_properties(self):
def check(ty, dtypes, ndim, layout, indexers=None):
self.assertEqual(ty.ndim, ndim)
self.assertEqual(ty.layout, layout)
self.assertEqual(ty.dtypes, dtypes)
views = [types.Array(dtype, 0, "C") for dtype in dtypes]
if len(views) > 1:
self.assertEqual(
ty.yield_type,
types.BaseTuple.from_types(views))
else:
self.assertEqual(ty.yield_type, views[0])
if indexers is not None:
self.assertEqual(ty.indexers, indexers)
f32 = types.float32
c64 = types.complex64
i16 = types.int16
a = types.Array(f32, 1, "C")
b = types.Array(f32, 2, "C")
c = types.Array(c64, 2, "F")
d = types.Array(i16, 2, "A")
e = types.Array(i16, 0, "C")
f = types.Array(f32, 1, "A")
g = types.Array(f32, 0, "C")
# 0-dim iterator
ty = types.NumpyNdIterType((e,))
check(ty, (i16,), 0, "C", [('0d', 0, 0, [0])])
self.assertFalse(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((e, g))
check(ty, (i16, f32), 0, "C", [('0d', 0, 0, [0, 1])])
self.assertFalse(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((e, c64))
check(ty, (i16, c64), 0, "C",
[('0d', 0, 0, [0]), ('scalar', 0, 0, [1])])
self.assertFalse(ty.need_shaped_indexing)
# 1-dim iterator
ty = types.NumpyNdIterType((a,))
check(ty, (f32,), 1, "C",
[('flat', 0, 1, [0])])
self.assertFalse(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((a, a))
check(ty, (f32, f32), 1, "C",
[('flat', 0, 1, [0, 1])])
self.assertFalse(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((a, e, e, c64))
check(ty, (f32, i16, i16, c64), 1, "C",
[('flat', 0, 1, [0]), # a
('0d', 0, 0, [1, 2]), # e, e
('scalar', 0, 0, [3]), # c64
])
self.assertFalse(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((a, f))
check(ty, (f32, f32), 1, "C",
[('flat', 0, 1, [0]), ('indexed', 0, 1, [1])])
self.assertTrue(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((f,))
check(ty, (f32,), 1, "C", [('indexed', 0, 1, [0])])
self.assertTrue(ty.need_shaped_indexing)
# 2-dim C-order iterator
ty = types.NumpyNdIterType((b,))
check(ty, (f32,), 2, "C", [('flat', 0, 2, [0])])
self.assertFalse(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((b, c))
check(
ty, (f32, c64), 2, "C", [
('flat', 0, 2, [0]), ('indexed', 0, 2, [1])])
self.assertTrue(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((d,))
check(ty, (i16,), 2, "C", [('indexed', 0, 2, [0])])
self.assertTrue(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((b, c, d, d, e))
check(ty, (f32, c64, i16, i16, i16), 2, "C",
[('flat', 0, 2, [0]), # b
('indexed', 0, 2, [1, 2, 3]), # c, d, d
('0d', 0, 0, [4]), # e
])
self.assertTrue(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((a, b, c, d, d, f))
check(ty, (f32, f32, c64, i16, i16, f32), 2, "C",
[('flat', 1, 2, [0]), # a
('flat', 0, 2, [1]), # b
('indexed', 0, 2, [2, 3, 4]), # c, d, d
('indexed', 1, 2, [5]), # f
])
self.assertTrue(ty.need_shaped_indexing)
# 2-dim F-order iterator
ty = types.NumpyNdIterType((c,))
check(ty, (c64,), 2, "F", [('flat', 0, 2, [0])])
self.assertFalse(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((c, b, c, f))
check(ty, (c64, f32, c64, f32), 2, "F",
[('flat', 0, 2, [0, 2]), # c, c
('indexed', 0, 2, [1]), # b
('indexed', 0, 1, [3]), # f
])
self.assertTrue(ty.need_shaped_indexing)
ty = types.NumpyNdIterType((b, c, c, d, d, a, e))
check(ty, (f32, c64, c64, i16, i16, f32, i16), 2, "F",
[('indexed', 0, 2, [0, 3, 4]), # b, d, d
('flat', 0, 2, [1, 2]), # c, c
('flat', 0, 1, [5]), # a
('0d', 0, 0, [6]), # e
])
self.assertTrue(ty.need_shaped_indexing)
|
TestNdIter
|
python
|
huggingface__transformers
|
src/transformers/models/ibert/modeling_ibert.py
|
{
"start": 6353,
"end": 12165
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.quant_mode = config.quant_mode
self.weight_bit = 8
self.bias_bit = 32
self.act_bit = 8
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# Q, K, V Linear layers
self.query = QuantLinear(
config.hidden_size,
self.all_head_size,
bias=True,
weight_bit=self.weight_bit,
bias_bit=self.bias_bit,
quant_mode=self.quant_mode,
per_channel=True,
)
self.key = QuantLinear(
config.hidden_size,
self.all_head_size,
bias=True,
weight_bit=self.weight_bit,
bias_bit=self.bias_bit,
quant_mode=self.quant_mode,
per_channel=True,
)
self.value = QuantLinear(
config.hidden_size,
self.all_head_size,
bias=True,
weight_bit=self.weight_bit,
bias_bit=self.bias_bit,
quant_mode=self.quant_mode,
per_channel=True,
)
# Requantization (32bit -> 8bit) for Q, K, V activations
self.query_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.key_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.value_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.softmax = IntSoftmax(self.act_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant)
def forward(
self,
hidden_states,
hidden_states_scaling_factor,
attention_mask=None,
output_attentions=False,
):
# Projection
mixed_query_layer, mixed_query_layer_scaling_factor = self.query(hidden_states, hidden_states_scaling_factor)
mixed_key_layer, mixed_key_layer_scaling_factor = self.key(hidden_states, hidden_states_scaling_factor)
mixed_value_layer, mixed_value_layer_scaling_factor = self.value(hidden_states, hidden_states_scaling_factor)
# Requantization
query_layer, query_layer_scaling_factor = self.query_activation(
mixed_query_layer, mixed_query_layer_scaling_factor
)
key_layer, key_layer_scaling_factor = self.key_activation(mixed_key_layer, mixed_key_layer_scaling_factor)
value_layer, value_layer_scaling_factor = self.value_activation(
mixed_value_layer, mixed_value_layer_scaling_factor
)
# Transpose
batch_size, seq_length, _ = hidden_states.shape
query_layer = query_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(
1, 2
)
key_layer = key_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = value_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(
1, 2
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
scale = math.sqrt(self.attention_head_size)
attention_scores = attention_scores / scale
if self.quant_mode:
attention_scores_scaling_factor = query_layer_scaling_factor * key_layer_scaling_factor / scale
else:
attention_scores_scaling_factor = None
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in IBertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs, attention_probs_scaling_factor = self.softmax(
attention_scores, attention_scores_scaling_factor
)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
if attention_probs_scaling_factor is not None:
context_layer_scaling_factor = attention_probs_scaling_factor * value_layer_scaling_factor
else:
context_layer_scaling_factor = None
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
# requantization: 32-bit -> 8-bit
context_layer, context_layer_scaling_factor = self.output_activation(
context_layer, context_layer_scaling_factor
)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
output_scaling_factor = (
(context_layer_scaling_factor, attention_probs_scaling_factor)
if output_attentions
else (context_layer_scaling_factor,)
)
return outputs, output_scaling_factor
|
IBertSelfAttention
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/test.py
|
{
"start": 3499,
"end": 5497
}
|
class ____(TestResult):
"""Test timeout."""
def __init__(self, timeout_duration: int | float) -> None:
super().__init__(command='timeout', test='')
self.timeout_duration = timeout_duration
def write(self, args: TestConfig) -> None:
"""Write the test results to various locations."""
message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration
# Include a leading newline to improve readability on Shippable "Tests" tab.
# Without this, the first line becomes indented.
output = """
One or more of the following situations may be responsible:
- Code changes have resulted in tests that hang or run for an excessive amount of time.
- Tests have been added which exceed the time limit when combined with existing tests.
- Test infrastructure and/or external dependencies are operating slower than normal."""
if args.coverage:
output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.'
output += '\n\nConsult the console log for additional details on where the timeout occurred.'
suites = junit_xml.TestSuites(
suites=[
junit_xml.TestSuite(
name='ansible-test',
timestamp=datetime.datetime.now(tz=datetime.timezone.utc),
cases=[
junit_xml.TestCase(
name='timeout',
classname='timeout',
errors=[
junit_xml.TestError(
message=message,
),
],
),
],
)
],
)
report = suites.to_pretty_xml()
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
|
TestTimeout
|
python
|
charliermarsh__ruff
|
python/ruff-ecosystem/ruff_ecosystem/types.py
|
{
"start": 285,
"end": 630
}
|
class ____(abc.ABC):
"""
Allows serialization of content by casting to a JSON-compatible type.
"""
def jsonable(self) -> Any:
# Default implementation for dataclasses
if is_dataclass(self) and not isinstance(self, type):
return dataclasses.asdict(self)
raise NotImplementedError()
|
Serializable
|
python
|
kamyu104__LeetCode-Solutions
|
Python/similar-string-groups.py
|
{
"start": 654,
"end": 1981
}
|
class ____(object):
def numSimilarGroups(self, A):
def isSimilar(a, b):
diff = 0
for x, y in itertools.izip(a, b):
if x != y:
diff += 1
if diff > 2:
return False
return diff == 2
N, L = len(A), len(A[0])
union_find = UnionFind(N)
if N < L*L:
for (i1, word1), (i2, word2) in \
itertools.combinations(enumerate(A), 2):
if isSimilar(word1, word2):
union_find.union_set(i1, i2)
else:
buckets = collections.defaultdict(list)
lookup = set()
for i in xrange(len(A)):
word = list(A[i])
if A[i] not in lookup:
buckets[A[i]].append(i)
lookup.add(A[i])
for j1, j2 in itertools.combinations(xrange(L), 2):
word[j1], word[j2] = word[j2], word[j1]
buckets["".join(word)].append(i)
word[j1], word[j2] = word[j2], word[j1]
for word in A: # Time: O(n * l^4)
for i1, i2 in itertools.combinations(buckets[word], 2):
union_find.union_set(i1, i2)
return union_find.size()
|
Solution
|
python
|
apache__airflow
|
providers/http/tests/unit/http/notifications/test_http.py
|
{
"start": 957,
"end": 3435
}
|
class ____:
def test_class_and_notifier_are_same(self):
assert send_http_notification is HttpNotifier
@mock.patch("airflow.providers.http.notifications.http.HttpHook")
def test_http_notifier(self, mock_http_hook):
notifier = HttpNotifier(
http_conn_id="test_conn_id",
endpoint="/testing",
method="POST",
json={"message": "testing"},
headers={"Content-Type": "application/json"},
)
notifier.notify({})
mock_http_hook.return_value.run.assert_called_once_with(
endpoint="/testing",
data=None,
headers={"Content-Type": "application/json"},
extra_options={},
json={"message": "testing"},
)
mock_http_hook.assert_called_once_with(method="POST", http_conn_id="test_conn_id")
@pytest.mark.asyncio
@mock.patch("airflow.providers.http.notifications.http.HttpAsyncHook")
@mock.patch("aiohttp.ClientSession")
async def test_async_http_notifier(self, mock_session, mock_http_async_hook):
mock_hook = mock_http_async_hook.return_value
mock_hook.run = mock.AsyncMock()
notifier = HttpNotifier(
http_conn_id="test_conn_id",
endpoint="/test",
method="POST",
json={"message": "test"},
)
await notifier.async_notify({})
mock_hook.run.assert_called_once_with(
session=mock_session.return_value.__aenter__.return_value,
endpoint="/test",
data=None,
json={"message": "test"},
headers=None,
extra_options={},
)
@mock.patch("airflow.providers.http.notifications.http.HttpHook")
def test_http_notifier_templated(self, mock_http_hook, create_dag_without_db):
notifier = HttpNotifier(
endpoint="/{{ dag.dag_id }}",
json={"dag_id": "{{ dag.dag_id }}", "user": "{{ username }}"},
)
notifier(
{
"dag": create_dag_without_db("test_http_notification_templated"),
"username": "test-user",
}
)
mock_http_hook.return_value.run.assert_called_once_with(
endpoint="/test_http_notification_templated",
data=None,
headers=None,
extra_options={},
json={"dag_id": "test_http_notification_templated", "user": "test-user"},
)
|
TestHttpNotifier
|
python
|
gevent__gevent
|
src/gevent/tests/test__socket_dns.py
|
{
"start": 21874,
"end": 21937
}
|
class ____(TestCase):
pass
add(Test1234, '1.2.3.4')
|
Test1234
|
python
|
doocs__leetcode
|
solution/3300-3399/3312.Sorted GCD Pair Queries/Solution.py
|
{
"start": 0,
"end": 463
}
|
class ____:
def gcdValues(self, nums: List[int], queries: List[int]) -> List[int]:
mx = max(nums)
cnt = Counter(nums)
cnt_g = [0] * (mx + 1)
for i in range(mx, 0, -1):
v = 0
for j in range(i, mx + 1, i):
v += cnt[j]
cnt_g[i] -= cnt_g[j]
cnt_g[i] += v * (v - 1) // 2
s = list(accumulate(cnt_g))
return [bisect_right(s, q) for q in queries]
|
Solution
|
python
|
pytorch__pytorch
|
test/dynamo/test_trace_rules.py
|
{
"start": 19361,
"end": 20243
}
|
class ____(torch._dynamo.test_case.TestCase):
@unittest.skipIf(
not torch.distributed.is_available(),
"need to import MLP module from distributed",
)
@skipIfWindows(
msg="AssertionError: False is not true : MLP did not survive skip files"
)
def test_module_survive_skip_files(self):
from torch.testing._internal.common_fsdp import MLP
model = MLP(3)
inp = torch.randn((2, 3))
frame_count_before = torch._dynamo.convert_frame.FRAME_COUNTER
model.compile(backend="eager")
model(inp)
frame_count_after = torch._dynamo.convert_frame.FRAME_COUNTER
self.assertTrue(
frame_count_after > frame_count_before, "MLP did not survive skip files"
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
TestModuleSurviveSkipFiles
|
python
|
streamlit__streamlit
|
lib/streamlit/testing/v1/element_tree.py
|
{
"start": 10993,
"end": 11973
}
|
class ____(Widget):
"""A representation of ``st.chat_input``."""
_value: str | None
proto: ChatInputProto = field(repr=False)
placeholder: str
def __init__(self, proto: ChatInputProto, root: ElementTree) -> None:
super().__init__(proto, root)
self.type = "chat_input"
def set_value(self, v: str | None) -> ChatInput:
"""Set the value of the widget."""
self._value = v
return self
@property
def _widget_state(self) -> WidgetState:
ws = WidgetState()
ws.id = self.id
if self._value is not None:
ws.string_trigger_value.data = self._value
return ws
@property
def value(self) -> str | None:
"""The value of the widget. (str)""" # noqa: D400
if self._value:
return self._value
state = self.root.session_state
assert state
return state[TESTING_KEY][self.id] # type: ignore
@dataclass(repr=False)
|
ChatInput
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/streams.py
|
{
"start": 5793,
"end": 7343
}
|
class ____(FxTracebackAnnotateVariable):
"""This represents torch.cuda.StreamContext"""
@staticmethod
def create(
tx: "InstructionTranslator",
stream_to_enter: "StreamVariable",
**kwargs: dict[str, Any],
) -> "StreamContextVariable":
return StreamContextVariable(
stream_to_enter,
**kwargs,
)
def __init__(self, stream: Optional["StreamVariable"], **kwargs: Any) -> None:
self.stream = stream
super().__init__(
target_values={"stream": self.get_stream().user_object_index},
initial_values=None,
**kwargs,
)
def enter(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
# to stream, from stream is the order of the arguments
# we are entering the target, and leaving the initial stream
tx.symbolic_stream_state.enter_stream(self.get_stream())
return super().enter(tx)
def exit(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
# to stream, from stream is the order of the arguments
# we are leaving the target, and entering the initial stream
tx.symbolic_stream_state.exit_stream()
return super().exit(tx, *args)
def supports_graph_breaks(self) -> bool:
return True
def get_stream(self) -> "StreamVariable":
assert self.stream, "Stream context should have a separate stream"
return self.stream
|
StreamContextVariable
|
python
|
tornadoweb__tornado
|
tornado/test/routing_test.py
|
{
"start": 1979,
"end": 2087
}
|
class ____(RequestHandler):
def post(self, path):
resources[path] = self.request.body
|
PostResource
|
python
|
pydantic__pydantic
|
tests/test_json_schema.py
|
{
"start": 92845,
"end": 92950
}
|
class ____(BaseModel):
class NestedModel(BaseModel):
b: float
nested: NestedModel
|
ModelTwo
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-box/llama_index/readers/box/BoxReaderTextExtraction/base.py
|
{
"start": 500,
"end": 3513
}
|
class ____(BoxReaderBase):
"""
A reader class for loading text content from Box files.
This class inherits from the `BaseReader` class and specializes in
extracting plain text content from Box files. It utilizes the provided
BoxClient object to interact with the Box API and retrieves the text
representation of the files.
Attributes:
_box_client (BoxClient): An authenticated Box client object used
for interacting with the Box API.
"""
@classmethod
def class_name(cls) -> str:
return "BoxReaderTextExtraction"
def __init__(self, box_client: BoxClient):
super().__init__(box_client=box_client)
# def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
def load_data(
self,
file_ids: Optional[List[str]] = None,
folder_id: Optional[str] = None,
is_recursive: bool = False,
) -> List[Document]:
"""
Extracts text content from Box files and creates Document objects.
This method utilizes the Box API to retrieve the text representation
(if available) of the specified Box files. It then creates Document
objects containing the extracted text and file metadata.
Args:
file_ids (Optional[List[str]], optional): A list of Box file IDs
to extract text from. If provided, folder_id is ignored.
Defaults to None.
folder_id (Optional[str], optional): The ID of the Box folder to
extract text from. If provided, along with is_recursive set to
True, retrieves data from sub-folders as well. Defaults to None.
is_recursive (bool, optional): If True and folder_id is provided,
extracts text from sub-folders within the specified folder.
Defaults to False.
Returns:
List[Document]: A list of Document objects containing the extracted
text content and file metadata.
"""
# Connect to Box
box_check_connection(self._box_client)
docs: List[Document] = []
box_files: List[File] = []
# get Box files details
if file_ids is not None:
box_files.extend(
get_box_files_details(box_client=self._box_client, file_ids=file_ids)
)
elif folder_id is not None:
box_files.extend(
get_box_folder_files_details(
box_client=self._box_client,
folder_id=folder_id,
is_recursive=is_recursive,
)
)
box_files = get_text_representation(
box_client=self._box_client,
box_files=box_files,
)
for file in box_files:
doc = box_file_to_llama_document(file)
doc.text = file.text_representation if file.text_representation else ""
docs.append(doc)
return docs
|
BoxReaderTextExtraction
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/patches.py
|
{
"start": 25070,
"end": 32379
}
|
class ____(Patch):
"""
A rectangle defined via an anchor point *xy* and its *width* and *height*.
The rectangle extends from ``xy[0]`` to ``xy[0] + width`` in x-direction
and from ``xy[1]`` to ``xy[1] + height`` in y-direction. ::
: +------------------+
: | |
: height |
: | |
: (xy)---- width -----+
One may picture *xy* as the bottom left corner, but which corner *xy* is
actually depends on the direction of the axis and the sign of *width*
and *height*; e.g. *xy* would be the bottom right corner if the x-axis
was inverted or if *width* was negative.
"""
def __str__(self):
pars = self._x0, self._y0, self._width, self._height, self.angle
fmt = "Rectangle(xy=(%g, %g), width=%g, height=%g, angle=%g)"
return fmt % pars
@_docstring.interpd
def __init__(self, xy, width, height, *,
angle=0.0, rotation_point='xy', **kwargs):
"""
Parameters
----------
xy : (float, float)
The anchor point.
width : float
Rectangle width.
height : float
Rectangle height.
angle : float, default: 0
Rotation in degrees anti-clockwise about the rotation point.
rotation_point : {'xy', 'center', (number, number)}, default: 'xy'
If ``'xy'``, rotate around the anchor point. If ``'center'`` rotate
around the center. If 2-tuple of number, rotate around this
coordinate.
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Patch` properties
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self._x0 = xy[0]
self._y0 = xy[1]
self._width = width
self._height = height
self.angle = float(angle)
self.rotation_point = rotation_point
# Required for RectangleSelector with axes aspect ratio != 1
# The patch is defined in data coordinates and when changing the
# selector with square modifier and not in data coordinates, we need
# to correct for the aspect ratio difference between the data and
# display coordinate systems. Its value is typically provide by
# Axes._get_aspect_ratio()
self._aspect_ratio_correction = 1.0
self._convert_units() # Validate the inputs.
def get_path(self):
"""Return the vertices of the rectangle."""
return Path.unit_rectangle()
def _convert_units(self):
"""Convert bounds of the rectangle."""
x0 = self.convert_xunits(self._x0)
y0 = self.convert_yunits(self._y0)
x1 = self.convert_xunits(self._x0 + self._width)
y1 = self.convert_yunits(self._y0 + self._height)
return x0, y0, x1, y1
def get_patch_transform(self):
# Note: This cannot be called until after this has been added to
# an Axes, otherwise unit conversion will fail. This makes it very
# important to call the accessor method and not directly access the
# transformation member variable.
bbox = self.get_bbox()
if self.rotation_point == 'center':
width, height = bbox.x1 - bbox.x0, bbox.y1 - bbox.y0
rotation_point = bbox.x0 + width / 2., bbox.y0 + height / 2.
elif self.rotation_point == 'xy':
rotation_point = bbox.x0, bbox.y0
else:
rotation_point = self.rotation_point
return transforms.BboxTransformTo(bbox) \
+ transforms.Affine2D() \
.translate(-rotation_point[0], -rotation_point[1]) \
.scale(1, self._aspect_ratio_correction) \
.rotate_deg(self.angle) \
.scale(1, 1 / self._aspect_ratio_correction) \
.translate(*rotation_point)
@property
def rotation_point(self):
"""The rotation point of the patch."""
return self._rotation_point
@rotation_point.setter
def rotation_point(self, value):
if value in ['center', 'xy'] or (
isinstance(value, tuple) and len(value) == 2 and
isinstance(value[0], Real) and isinstance(value[1], Real)
):
self._rotation_point = value
else:
raise ValueError("`rotation_point` must be one of "
"{'xy', 'center', (number, number)}.")
def get_x(self):
"""Return the left coordinate of the rectangle."""
return self._x0
def get_y(self):
"""Return the bottom coordinate of the rectangle."""
return self._y0
def get_xy(self):
"""Return the left and bottom coords of the rectangle as a tuple."""
return self._x0, self._y0
def get_corners(self):
"""
Return the corners of the rectangle, moving anti-clockwise from
(x0, y0).
"""
return self.get_patch_transform().transform(
[(0, 0), (1, 0), (1, 1), (0, 1)])
def get_center(self):
"""Return the centre of the rectangle."""
return self.get_patch_transform().transform((0.5, 0.5))
def get_width(self):
"""Return the width of the rectangle."""
return self._width
def get_height(self):
"""Return the height of the rectangle."""
return self._height
def get_angle(self):
"""Get the rotation angle in degrees."""
return self.angle
def set_x(self, x):
"""Set the left coordinate of the rectangle."""
self._x0 = x
self.stale = True
def set_y(self, y):
"""Set the bottom coordinate of the rectangle."""
self._y0 = y
self.stale = True
def set_angle(self, angle):
"""
Set the rotation angle in degrees.
The rotation is performed anti-clockwise around *xy*.
"""
self.angle = angle
self.stale = True
def set_xy(self, xy):
"""
Set the left and bottom coordinates of the rectangle.
Parameters
----------
xy : (float, float)
"""
self._x0, self._y0 = xy
self.stale = True
def set_width(self, w):
"""Set the width of the rectangle."""
self._width = w
self.stale = True
def set_height(self, h):
"""Set the height of the rectangle."""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle as *left*, *bottom*, *width*, *height*.
The values may be passed as separate parameters or as a tuple::
set_bounds(left, bottom, width, height)
set_bounds((left, bottom, width, height))
.. ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 1:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x0 = l
self._y0 = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
"""Return the `.Bbox`."""
return transforms.Bbox.from_extents(*self._convert_units())
xy = property(get_xy, set_xy)
|
Rectangle
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/airflow_aux/test_cleanup_pods.py
|
{
"start": 14954,
"end": 16332
}
|
class ____:
"""Tests cleanup of service accounts."""
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"cleanup": {
"enabled": True,
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/cleanup/cleanup-serviceaccount.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
def test_default_automount_service_account_token(self):
docs = render_chart(
values={
"cleanup": {
"enabled": True,
},
},
show_only=["templates/cleanup/cleanup-serviceaccount.yaml"],
)
assert jmespath.search("automountServiceAccountToken", docs[0]) is True
def test_overridden_automount_service_account_token(self):
docs = render_chart(
values={
"cleanup": {"enabled": True, "serviceAccount": {"automountServiceAccountToken": False}},
},
show_only=["templates/cleanup/cleanup-serviceaccount.yaml"],
)
assert jmespath.search("automountServiceAccountToken", docs[0]) is False
|
TestCleanupServiceAccount
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-chroma/destination_chroma/destination.py
|
{
"start": 844,
"end": 4559
}
|
class ____(Destination):
indexer: Indexer
embedder: Embedder
def _init_indexer(self, config: ConfigModel):
self.embedder = (
create_from_config(config.embedding, config.processing)
if config.embedding.mode != "no_embedding"
else NoEmbedder(config.embedding)
)
self.indexer = ChromaIndexer(config.indexing)
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
"""
Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received
in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been
successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json
:param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the
destination
:param input_messages: The stream of input messages received from the source
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs
"""
config_model = ConfigModel.parse_obj(config)
self._init_indexer(config_model)
writer = Writer(
config_model.processing, self.indexer, self.embedder, batch_size=BATCH_SIZE, omit_raw_text=config_model.omit_raw_text
)
yield from writer.write(configured_catalog, input_messages)
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the destination with the needed permissions
e.g: if a provided API token or password can be used to connect and write to the destination.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
parsed_config = ConfigModel.parse_obj(config)
self._init_indexer(parsed_config)
checks = [self.embedder.check(), self.indexer.check(), DocumentProcessor.check_config(parsed_config.processing)]
errors = [error for error in checks if error is not None]
if len(errors) > 0:
return AirbyteConnectionStatus(status=Status.FAILED, message="\n".join(errors))
else:
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
def spec(self, *args: Any, **kwargs: Any) -> ConnectorSpecification:
return ConnectorSpecification(
documentationUrl="https://docs.airbyte.com/integrations/destinations/chroma",
supportsIncremental=True,
supported_destination_sync_modes=[DestinationSyncMode.overwrite, DestinationSyncMode.append, DestinationSyncMode.append_dedup],
connectionSpecification=ConfigModel.schema(),
)
|
DestinationChroma
|
python
|
gevent__gevent
|
src/gevent/tests/test__local.py
|
{
"start": 1904,
"end": 2008
}
|
class ____(local):
@classmethod
def a_classmethod(cls):
return cls
|
LocalWithClassMethod
|
python
|
pydantic__pydantic
|
tests/mypy/modules/plugin_fail.py
|
{
"start": 1455,
"end": 1531
}
|
class ____(Model):
model_config = ConfigDict(frozen=False)
|
InheritingModel
|
python
|
pytorch__pytorch
|
torch/_inductor/scheduler.py
|
{
"start": 2947,
"end": 12596
}
|
class ____:
"""
This class contains utility functions to decide if we should fuse reductions
reducing across different dimensions of the same input tensor.
"""
@staticmethod
def is_split_reduction(node: BaseSchedulerNode) -> bool:
return node.is_reduction() and all(
subnode.node._split_size is not None
for subnode in node.get_nodes()
if isinstance(subnode, SchedulerNode)
and subnode.is_reduction()
and isinstance(subnode.node, ComputedBuffer)
)
@classmethod
def get_numel_rnumel(cls, node: BaseSchedulerNode) -> tuple[sympy.Expr, sympy.Expr]:
if cls.is_split_reduction(node):
xnumel = None
rnumel = None
for subnode in node.get_nodes():
if not (
isinstance(subnode, SchedulerNode)
and subnode.is_reduction()
and isinstance(subnode.node, ComputedBuffer)
):
continue
assert subnode.node._original_ranges is not None
curxnumel = V.graph.sizevars.simplify(
sympy_product(subnode.node._original_ranges)
)
assert subnode.node._original_reduction_ranges is not None
currnumel = V.graph.sizevars.simplify(
sympy_product(subnode.node._original_reduction_ranges)
)
if xnumel is None:
xnumel = curxnumel
rnumel = currnumel
else:
assert V.graph.sizevars.statically_known_equals(
xnumel, curxnumel
), f"{xnumel} v.s. {curxnumel}"
assert V.graph.sizevars.statically_known_equals(
rnumel, currnumel
), f"{rnumel} v.s. {currnumel}"
assert xnumel is not None
return (xnumel, rnumel)
else:
return node.group[1] # type: ignore[return-value]
@classmethod
def has_mix_reduction_orders(
cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> bool:
g1 = cls.get_numel_rnumel(node1)
g2 = cls.get_numel_rnumel(node2)
if len(g1) != 2 or len(g2) != 2 or g1 == g2:
return False
return tuple(g1) == tuple(reversed(g2))
@classmethod
def _is_full_access(cls, buf: str, node: BaseSchedulerNode) -> bool:
"""
The access to 'buf' is not a broadcast access.
"""
found_dep = None
for dep in node.read_writes.reads:
if isinstance(dep, MemoryDep) and dep.name == buf:
found_dep = dep
break
if not found_dep:
return False
index = found_dep.index
var_ranges = node.read_writes.var_ranges
if not var_ranges:
assert isinstance(node, FusedSchedulerNode), f"{type(node)}"
var_ranges = node.snodes[0].read_writes.var_ranges
assert var_ranges
if not (OrderedSet(var_ranges) - OrderedSet(index.free_symbols)):
return True
# cases that happen after merging loops:
# MemoryDep('arg0_1', c0, {c0: 25165824})])
# var_ranges={d0: 32768, d1: 768}
if V.graph.sizevars.statically_known_equals(
sympy_product(found_dep.size), sympy_product(var_ranges.values())
):
return True
return False
@classmethod
def get_common_read(
cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> list[str]:
out = []
common_reads = node1.used_buffer_names() & node2.used_buffer_names()
for buf in common_reads:
if cls._is_full_access(buf, node1) and cls._is_full_access(buf, node2):
out.append(buf)
return out
@classmethod
def has_common_read(
cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> bool:
return len(cls.get_common_read(node1, node2)) > 0
# TODO add a cache
@classmethod
def can_fuse(cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> bool:
"""
Check whether we can fuse two reductions with mix loop orders.
"""
if not config.triton.mix_order_reduction:
return False
if not node1.is_gpu() or not node2.is_gpu():
return False
device_type = node1.get_device().type # type: ignore[union-attr]
if (
device_type not in ("cuda", "xpu")
or get_current_backend(device_type) != "triton"
):
return False
if not node1.is_reduction() or not node2.is_reduction():
return False
# check for mix reduction orders
if not cls.has_mix_reduction_orders(node1, node2):
return False
# check common buffer accesses
common_reads = MixOrderReduction.get_common_read(node1, node2)
if len(common_reads) == 0:
return False
g1 = cls.get_numel_rnumel(node1)
nrow = sympy.Max(g1[0], g1[1])
ncol = sympy.Min(g1[0], g1[1])
# the fused version has worse perf than non-fused version for
# small workload. When a workload is small enough, data can be
# fully cached by L2
size_thres = 5 * 2**20
# Call evaluate_expr rather than statically_known_geq since nrow can
# have dynamic shape in real models.
# Don't use hint directly since hint can be non-representative.
if not V.graph.sizevars.evaluate_expr(sympy.Ge(nrow * ncol, size_thres)):
return False
# We require more more row than columns since
# 1, we prefer doing persistent reduction for each row
# 2, we will split the reduction across the rows
if not V.graph.sizevars.evaluate_expr(sympy.Ge(nrow, ncol * 2)):
return False
# When nrow is small, ncol should also be small (due to the check
# above). Thus the entire tensor should be well cached in L2.
# Mix order reduction is less beneficial.
if not V.graph.sizevars.evaluate_expr(sympy.Ge(nrow, 4096)):
return False
contiguous_node, other_node = (
(node1, node2)
if V.graph.sizevars.evaluate_expr(sympy.Eq(g1[1], ncol))
else (node2, node1)
)
# We previously only check the contiguous_node has contiguous
# access to common_reads. But that turns out to be not enough.
# The contiguous node may access a buffer that's node use by
# other_ndoe. If that ascess is non-contiugous, generating
# mix-order reduction can be inefficient especially when we
# force XBLOCK to be 1
# if not all(
# cls.is_contiguous_load(buf, contiguous_node) for buf in common_reads
# ):
# return False
if not all(
cls.is_contiguous_load(dep.name, contiguous_node)
for dep in contiguous_node.read_writes.reads
):
return False
# Make sure a persistent reduction will be generated
if any(
subnode.node.data.reduction_hint # type: ignore[union-attr]
not in (
ReductionHint.INNER,
ReductionHint.DEFAULT,
)
for subnode in contiguous_node.get_nodes()
if subnode.is_reduction()
):
return False
# rnumel so large that we will not generated persistent reduction
# We don't see real use cases with dynamic ncol. But if we do,
# we should call evaluete_expr here which adds guards.
if not V.graph.sizevars.statically_known_leq(ncol, 1024 * 16):
return False
# Other reduction types like max/min is not supported yet.
# There are no real use case as well.
out = all(
subnode.node.get_reduction_type() # type: ignore[union-attr]
in {
"sum",
"prod",
}
for subnode in other_node.get_nodes()
if subnode.is_reduction()
)
return out
@classmethod
def are_mix_order_reductions(
cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> bool:
return cls.can_fuse(node1, node2)
@classmethod
def is_contiguous_load(cls, buf: str, parent_node: BaseSchedulerNode) -> bool:
from torch._inductor.loop_body import MemoryUsageType
for node in parent_node.get_nodes():
assert isinstance(node, SchedulerNode)
loop_body = node._body
entries = loop_body.memory_usage[MemoryUsageType.LOAD]
index_names = [e.index_name for e in entries if e.buffer_name == buf]
if len(index_names) == 0:
continue
# there can be multiple index_names some times
for index_name in index_names:
index_expr = loop_body.indexing_exprs[index_name]
var_ranges = loop_body.var_ranges
# assumes the final symbol is for reduction
var_symbols = list(var_ranges.keys())
stride_vars = V.graph.sizevars.stride_vars(
index_expr,
var_symbols,
var_symbols,
)
# stride==0 means a broadcast
if not (stride_vars[-1] == 0 or stride_vars[-1] == 1):
return False
return True
@dataclasses.dataclass
|
MixOrderReduction
|
python
|
TheAlgorithms__Python
|
data_structures/queues/priority_queue_using_list.py
|
{
"start": 2636,
"end": 5717
}
|
class ____:
"""
Element Priority Queue is the same as Fixed Priority Queue except that the value of
the element itself is the priority. The rules for priorities are the same the as
Fixed Priority Queue.
>>> epq = ElementPriorityQueue()
>>> epq.enqueue(10)
>>> epq.enqueue(70)
>>> epq.enqueue(4)
>>> epq.enqueue(1)
>>> epq.enqueue(5)
>>> epq.enqueue(7)
>>> epq.enqueue(4)
>>> epq.enqueue(64)
>>> epq.enqueue(128)
>>> print(epq)
[10, 70, 4, 1, 5, 7, 4, 64, 128]
>>> epq.dequeue()
1
>>> epq.dequeue()
4
>>> epq.dequeue()
4
>>> epq.dequeue()
5
>>> epq.dequeue()
7
>>> epq.dequeue()
10
>>> print(epq)
[70, 64, 128]
>>> epq.dequeue()
64
>>> epq.dequeue()
70
>>> epq.dequeue()
128
>>> epq.dequeue()
Traceback (most recent call last):
...
data_structures.queues.priority_queue_using_list.UnderFlowError: The queue is empty
>>> print(epq)
[]
"""
def __init__(self):
self.queue = []
def enqueue(self, data: int) -> None:
"""
This function enters the element into the queue
If the queue is full an Exception is raised saying Over Flow!
"""
if len(self.queue) == 100:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(data)
def dequeue(self) -> int:
"""
Return the highest priority element in FIFO order.
If the queue is empty then an under flow exception is raised.
"""
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
data = min(self.queue)
self.queue.remove(data)
return data
def __str__(self) -> str:
"""
Prints all the elements within the Element Priority Queue
"""
return str(self.queue)
def fixed_priority_queue():
fpq = FixedPriorityQueue()
fpq.enqueue(0, 10)
fpq.enqueue(1, 70)
fpq.enqueue(0, 100)
fpq.enqueue(2, 1)
fpq.enqueue(2, 5)
fpq.enqueue(1, 7)
fpq.enqueue(2, 4)
fpq.enqueue(1, 64)
fpq.enqueue(0, 128)
print(fpq)
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq)
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
def element_priority_queue():
epq = ElementPriorityQueue()
epq.enqueue(10)
epq.enqueue(70)
epq.enqueue(100)
epq.enqueue(1)
epq.enqueue(5)
epq.enqueue(7)
epq.enqueue(4)
epq.enqueue(64)
epq.enqueue(128)
print(epq)
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq)
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
|
ElementPriorityQueue
|
python
|
doocs__leetcode
|
solution/2100-2199/2136.Earliest Possible Day of Full Bloom/Solution.py
|
{
"start": 0,
"end": 270
}
|
class ____:
def earliestFullBloom(self, plantTime: List[int], growTime: List[int]) -> int:
ans = t = 0
for pt, gt in sorted(zip(plantTime, growTime), key=lambda x: -x[1]):
t += pt
ans = max(ans, t + gt)
return ans
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry/web/test_urls.py
|
{
"start": 79,
"end": 354
}
|
class ____(TestCase):
def test_response(self) -> None:
path = reverse("sentry-docs-redirect")
resp = self.client.get(path)
assert resp["Location"] == "https://docs.sentry.io/"
assert resp.status_code == 302, resp.status_code
|
DocsRedirectTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-balanced-substring-i.py
|
{
"start": 638,
"end": 1223
}
|
class ____(object):
def longestBalanced(self, s):
"""
:type s: str
:rtype: int
"""
result = 0
for i in xrange(len(s)):
cnt = [0]*26
mx = unique = 0
for j in xrange(i, len(s)):
if cnt[ord(s[j])-ord('a')] == 0:
unique += 1
cnt[ord(s[j])-ord('a')] += 1
mx = max(mx, cnt[ord(s[j])-ord('a')])
if (j-i+1)%unique == 0 and (j-i+1)//unique == mx:
result = max(result, j-i+1)
return result
|
Solution2
|
python
|
openai__gym
|
tests/wrappers/test_nested_dict.py
|
{
"start": 227,
"end": 2987
}
|
class ____(gym.Env):
def __init__(self, observation_space, render_mode=None):
self.observation_space = observation_space
self.obs_keys = self.observation_space.spaces.keys()
self.action_space = Box(shape=(1,), low=-1, high=1, dtype=np.float32)
self.render_mode = render_mode
def render(self, mode="human"):
image_shape = (32, 32, 3)
return np.zeros(image_shape, dtype=np.uint8)
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
super().reset(seed=seed)
observation = self.observation_space.sample()
return observation, {}
def step(self, action):
del action
observation = self.observation_space.sample()
reward, terminal, info = 0.0, False, {}
return observation, reward, terminal, info
NESTED_DICT_TEST_CASES = (
(
Dict(
{
"key1": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
"key2": Dict(
{
"subkey1": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
"subkey2": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
}
),
(6,),
),
(
Dict(
{
"key1": Box(shape=(2, 3), low=-1, high=1, dtype=np.float32),
"key2": Box(shape=(), low=-1, high=1, dtype=np.float32),
"key3": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
(9,),
),
(
Dict(
{
"key1": Tuple(
(
Box(shape=(2,), low=-1, high=1, dtype=np.float32),
Box(shape=(2,), low=-1, high=1, dtype=np.float32),
)
),
"key2": Box(shape=(), low=-1, high=1, dtype=np.float32),
"key3": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
(7,),
),
(
Dict(
{
"key1": Tuple((Box(shape=(2,), low=-1, high=1, dtype=np.float32),)),
"key2": Box(shape=(), low=-1, high=1, dtype=np.float32),
"key3": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
(5,),
),
(
Dict(
{
"key1": Tuple(
(Dict({"key9": Box(shape=(2,), low=-1, high=1, dtype=np.float32)}),)
),
"key2": Box(shape=(), low=-1, high=1, dtype=np.float32),
"key3": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
(5,),
),
)
|
FakeEnvironment
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/tracers/_streaming.py
|
{
"start": 250,
"end": 982
}
|
class ____(typing.Protocol[T]):
"""Types for streaming callback handlers.
This is a common mixin that the callback handlers
for both astream events and astream log inherit from.
The `tap_output_aiter` method is invoked in some contexts
to produce callbacks for intermediate results.
"""
def tap_output_aiter(
self, run_id: UUID, output: AsyncIterator[T]
) -> AsyncIterator[T]:
"""Used for internal astream_log and astream events implementations."""
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
"""Used for internal astream_log and astream events implementations."""
__all__ = [
"_StreamingCallbackHandler",
]
|
_StreamingCallbackHandler
|
python
|
getsentry__sentry-python
|
sentry_sdk/envelope.py
|
{
"start": 6198,
"end": 10473
}
|
class ____:
def __init__(
self,
payload, # type: Union[bytes, str, PayloadRef]
headers=None, # type: Optional[Dict[str, Any]]
type=None, # type: Optional[str]
content_type=None, # type: Optional[str]
filename=None, # type: Optional[str]
):
if headers is not None:
headers = dict(headers)
elif headers is None:
headers = {}
self.headers = headers
if isinstance(payload, bytes):
payload = PayloadRef(bytes=payload)
elif isinstance(payload, str):
payload = PayloadRef(bytes=payload.encode("utf-8"))
else:
payload = payload
if filename is not None:
headers["filename"] = filename
if type is not None:
headers["type"] = type
if content_type is not None:
headers["content_type"] = content_type
elif "content_type" not in headers:
headers["content_type"] = payload.inferred_content_type
self.payload = payload
def __repr__(self):
# type: (...) -> str
return "<Item headers=%r payload=%r data_category=%r>" % (
self.headers,
self.payload,
self.data_category,
)
@property
def type(self):
# type: (...) -> Optional[str]
return self.headers.get("type")
@property
def data_category(self):
# type: (...) -> EventDataCategory
ty = self.headers.get("type")
if ty == "session" or ty == "sessions":
return "session"
elif ty == "attachment":
return "attachment"
elif ty == "transaction":
return "transaction"
elif ty == "event":
return "error"
elif ty == "log":
return "log_item"
elif ty == "trace_metric":
return "trace_metric"
elif ty == "client_report":
return "internal"
elif ty == "profile":
return "profile"
elif ty == "profile_chunk":
return "profile_chunk"
elif ty == "check_in":
return "monitor"
else:
return "default"
def get_bytes(self):
# type: (...) -> bytes
return self.payload.get_bytes()
def get_event(self):
# type: (...) -> Optional[Event]
"""
Returns an error event if there is one.
"""
if self.type == "event" and self.payload.json is not None:
return self.payload.json
return None
def get_transaction_event(self):
# type: (...) -> Optional[Event]
if self.type == "transaction" and self.payload.json is not None:
return self.payload.json
return None
def serialize_into(
self,
f, # type: Any
):
# type: (...) -> None
headers = dict(self.headers)
bytes = self.get_bytes()
headers["length"] = len(bytes)
f.write(json_dumps(headers))
f.write(b"\n")
f.write(bytes)
f.write(b"\n")
def serialize(self):
# type: (...) -> bytes
out = io.BytesIO()
self.serialize_into(out)
return out.getvalue()
@classmethod
def deserialize_from(
cls,
f, # type: Any
):
# type: (...) -> Optional[Item]
line = f.readline().rstrip()
if not line:
return None
headers = parse_json(line)
length = headers.get("length")
if length is not None:
payload = f.read(length)
f.readline()
else:
# if no length was specified we need to read up to the end of line
# and remove it (if it is present, i.e. not the very last char in an eof terminated envelope)
payload = f.readline().rstrip(b"\n")
if headers.get("type") in ("event", "transaction"):
rv = cls(headers=headers, payload=PayloadRef(json=parse_json(payload)))
else:
rv = cls(headers=headers, payload=payload)
return rv
@classmethod
def deserialize(
cls,
bytes, # type: bytes
):
# type: (...) -> Optional[Item]
return cls.deserialize_from(io.BytesIO(bytes))
|
Item
|
python
|
pypa__warehouse
|
tests/unit/accounts/test_forms.py
|
{
"start": 42126,
"end": 43337
}
|
class ____:
def test_validate(self):
user_service = pretend.stub(
find_userid=lambda userid: 1,
check_password=lambda userid, password, tags=None: True,
)
request = pretend.stub()
form = forms.ReAuthenticateForm(
formdata=MultiDict(
{
"username": "username",
"password": "mysupersecurepassword1!",
"next_route": pretend.stub(),
"next_route_matchdict": pretend.stub(),
"next_route_query": pretend.stub(),
}
),
request=request,
user_service=user_service,
)
assert form.user_service is user_service
assert form.__params__ == [
"username",
"password",
"next_route",
"next_route_matchdict",
"next_route_query",
]
assert isinstance(form.username, wtforms.StringField)
assert isinstance(form.next_route, wtforms.StringField)
assert isinstance(form.next_route_matchdict, wtforms.StringField)
assert form.validate(), str(form.errors)
|
TestReAuthenticateForm
|
python
|
scipy__scipy
|
scipy/integrate/tests/test_quadpack.py
|
{
"start": 3749,
"end": 4797
}
|
class ____:
def setup_method(self):
restype = ctypes.c_double
argtypes = (ctypes.c_int, ctypes.c_double)
for name in ['_multivariate_typical', '_multivariate_indefinite',
'_multivariate_sin']:
func = get_clib_test_routine(name, restype, *argtypes)
setattr(self, name, func)
def test_typical(self):
# 1) Typical function with two extra arguments:
assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)),
0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
assert_quad(quad(self._multivariate_indefinite, 0, np.inf),
0.577215664901532860606512)
def test_threadsafety(self):
# Ensure multivariate ctypes are threadsafe
def threadsafety(y):
return y + quad(self._multivariate_sin, 0, 1)[0]
assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
@make_xp_test_case(quad)
|
TestMultivariateCtypesQuad
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-excellent-pairs.py
|
{
"start": 591,
"end": 1174
}
|
class ____(object):
def countExcellentPairs(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def popcount(x):
return bin(x)[2:].count('1')
sorted_cnts = sorted(popcount(x) for x in set(nums))
result = 0
left, right = 0, len(sorted_cnts)-1
while left <= right:
if sorted_cnts[left]+sorted_cnts[right] < k:
left += 1
else:
result += 1+2*((right-1)-left+1)
right -= 1
return result
|
Solution2
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/triggers/test_mwaa.py
|
{
"start": 5283,
"end": 7607
}
|
class ____:
def test_overwritten_conn_passed_to_hook(self):
OVERWRITTEN_CONN = "new-conn-id"
op = MwaaTaskCompletedTrigger(**TRIGGER_TASK_KWARGS, aws_conn_id=OVERWRITTEN_CONN)
assert op.hook().aws_conn_id == OVERWRITTEN_CONN
def test_no_conn_passed_to_hook(self):
DEFAULT_CONN = "aws_default"
op = MwaaTaskCompletedTrigger(**TRIGGER_TASK_KWARGS)
assert op.hook().aws_conn_id == DEFAULT_CONN
def test_init_fail(self):
with pytest.raises(ValueError, match=r".*success_states.*failure_states.*"):
MwaaTaskCompletedTrigger(
**TRIGGER_TASK_KWARGS, success_states=("a", "b"), failure_states=("b", "c")
)
def test_serialization(self):
success_states = ["a", "b"]
failure_states = ["c", "d"]
trigger = MwaaTaskCompletedTrigger(
**TRIGGER_TASK_KWARGS, success_states=success_states, failure_states=failure_states
)
classpath, kwargs = trigger.serialize()
assert classpath == BASE_TRIGGER_CLASSPATH + "MwaaTaskCompletedTrigger"
assert kwargs.get("external_env_name") == TRIGGER_TASK_KWARGS["external_env_name"]
assert kwargs.get("external_dag_id") == TRIGGER_TASK_KWARGS["external_dag_id"]
assert kwargs.get("external_dag_run_id") == TRIGGER_TASK_KWARGS["external_dag_run_id"]
assert kwargs.get("external_task_id") == TRIGGER_TASK_KWARGS["external_task_id"]
assert kwargs.get("success_states") == success_states
assert kwargs.get("failure_states") == failure_states
@pytest.mark.asyncio
@mock.patch.object(MwaaHook, "get_waiter")
@mock.patch.object(MwaaHook, "get_async_conn")
async def test_run_success(self, mock_async_conn, mock_get_waiter):
mock_async_conn.__aenter__.return_value = mock.MagicMock()
mock_get_waiter().wait = AsyncMock()
trigger = MwaaTaskCompletedTrigger(**TRIGGER_TASK_KWARGS)
generator = trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent(
{"status": "success", "task_id": TRIGGER_TASK_KWARGS["external_task_id"]}
)
assert_expected_waiter_type(mock_get_waiter, "mwaa_task_complete")
mock_get_waiter().wait.assert_called_once()
|
TestMwaaTaskCompletedTrigger
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B032.py
|
{
"start": 321,
"end": 385
}
|
class ____:
def test_self(self):
self.test: int
|
TestClass
|
python
|
pytorch__pytorch
|
torch/_export/non_strict_utils.py
|
{
"start": 35663,
"end": 42340
}
|
class ____(torch.overrides.TorchFunctionMode):
"""
1. Handles data-dependent errors raised by torch function calls in non-strict.
Any data-dependent error is due to some condition on unbacked symints
that cannot be resolved. A mechanical way of fixing the error is to use
a torch._check() call to assert either that condition or its negation.
The handler suggests these options as code and points to the location
of the torch function call that raised the error as part of the error
message shown to the user, who can then simply select and copy-paste
a suggested fix at that location.
NOTE: Not all data-dependent errors are raised by torch function calls.
In particular, conditions on unbacked symints can appear outside such
calls, and as such are not handled here.
2. Overrides torch functions that are known to cause problems in non-strict.
Certain Python features, such as indexing/slicing, cannot be intercepted
in non-strict. Likewise, certain legacy ops, such as distributed collectives,
may need to be mapped to other ops. When there is special handling in Dynamo
for such things, tracing can fail in non-strict (while succeeding in strict).
Fortunately, redirecting to other torch functions can often fix such issues.
3. Handles line-of-code logging for each torch function call in non-strict.
Usage: TORCHEXPORT_EXTENDED_DEBUG_CURRENT_LOC=1 TORCH_LOGS="+export" ...
"""
def _override(self, func, args, kwargs):
if torch.distributed.is_available():
from torch.distributed._functional_collectives import (
REDUCE_OP_TO_STR,
traceable_collective_remaps,
)
if func in traceable_collective_remaps:
# Redirect to a corresponding functional collective, following Dynamo.
# See torch/distributed/_functional_collectives.py for details.
# The following is an adaptation of CollectiveFunctionRewriteVariable.
mapped_func = traceable_collective_remaps[func]
signature = inspect.signature(func)
kwargs = dict(signature.bind(*args, **kwargs).arguments)
args = ()
if func in (
torch.distributed.all_reduce,
torch.distributed.reduce_scatter_tensor,
torch.distributed._reduce_scatter_base,
):
if "op" in kwargs:
kwargs["op"] = REDUCE_OP_TO_STR[kwargs["op"]]
return mapped_func, args, kwargs
if func is torch.tensor:
# Redirect to Python implementation of torch.tensor for data with symints.
# NOTE(avik): We don't unconditionally redirect to this implementation
# because it has some known incompletenesses, e.g., it doesn't support
# empty data. See https://github.com/pytorch/pytorch/issues/143216
if any(
isinstance(a, (torch.SymInt, torch.SymFloat, torch.SymBool))
for a in pytree.tree_flatten(args[0])[0]
):
return torch._refs.tensor, args, kwargs
if func.__name__ == "__getitem__" and isinstance(args[0], torch.Tensor):
def rewrite(dim, item):
# Redirect to torch.select for indexing.
if item is None:
return dim + 1, (torch.unsqueeze, [dim])
if isinstance(item, (int, torch.SymInt)):
return dim, (torch.select, [dim, item])
# Redirect to torch.ops.aten.slice for slicing.
if isinstance(item, slice):
step = item.step or 1
if item.start is None and item.stop is None and step == 1:
# no-op
return dim + 1, (lambda t: t, [])
return dim + 1, (
torch.ops.aten.slice,
[dim, item.start, item.stop, step],
)
# Otherwise do nothing.
items = list(args[1]) if isinstance(args[1], tuple) else [args[1]]
has_symint = False
index_ellipsis = None
t = args[0]
n_none_slices = t.ndim + 1
for i, item in enumerate(items):
if isinstance(item, torch.SymInt) or (
isinstance(item, slice)
and any(
isinstance(s, torch.SymInt)
for s in (item.start, item.stop, item.step)
)
):
has_symint = True
if item is Ellipsis:
index_ellipsis = i
if item is not None:
n_none_slices -= 1
# only rewrite when there are symints
if has_symint:
if index_ellipsis is not None:
none_slices = [slice(None)] * n_none_slices
items[index_ellipsis : index_ellipsis + 1] = none_slices
dim = 0
# Sequence rewrites.
sequence = []
for item in items:
if (r := rewrite(dim, item)) is None:
return func, args, kwargs
dim, call_spec = r
sequence.append(call_spec)
def run():
# Run sequence.
# pyrefly: ignore [index-error]
t = args[0]
for _method, _args in sequence:
t = _method(t, *_args)
return t
return run, [], {}
return func, args, kwargs
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if torch.compiler.is_dynamo_compiling():
return func(*args, **kwargs)
if log.isEnabledFor(logging.DEBUG) and config.extended_debug_current_loc:
frame = _find_user_code_frame()
if frame is not None:
log.debug(
"%s called at %s:%s in %s",
func.__qualname__,
frame.f_code.co_filename,
frame.f_lineno,
frame.f_code.co_name,
)
func, args, kwargs = self._override(func, args, kwargs)
try:
return func(*args, **kwargs)
except GuardOnDataDependentSymNode as e:
_suggest_fixes_for_data_dependent_error_non_strict(e)
raise
|
_NonStrictTorchFunctionHandler
|
python
|
langchain-ai__langchain
|
libs/langchain/tests/unit_tests/callbacks/test_file.py
|
{
"start": 240,
"end": 2094
}
|
class ____(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: list[str] = ["foo"]
the_output_keys: list[str] = ["bar"]
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> list[str]:
"""Output key of bar."""
return self.the_output_keys
@override
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
return {"bar": "bar"}
def strip_ansi(text: str) -> str:
"""Removes ANSI escape sequences from a string.
Args:
text: The string potentially containing ANSI codes.
"""
ansi_escape = re.compile(r"\x1B\[[0-?]*[ -/]*[@-~]")
return ansi_escape.sub("", text)
def test_filecallback(tmp_path: pathlib.Path) -> None:
"""Test the file callback handler."""
log1 = tmp_path / "output.log"
handler = FileCallbackHandler(str(log1))
chain_test = FakeChain(callbacks=[handler])
chain_test.invoke({"foo": "bar"})
handler.close()
# Assert the output is as expected
assert "Entering new FakeChain chain" in strip_ansi(log1.read_text())
# Test using a callback manager
log2 = tmp_path / "output2.log"
with FileCallbackHandler(str(log2)) as handler_cm:
chain_test = FakeChain(callbacks=[handler_cm])
chain_test.invoke({"foo": "bar"})
assert "Entering new FakeChain chain" in strip_ansi(log2.read_text())
# Test passing via invoke callbacks
log3 = tmp_path / "output3.log"
with FileCallbackHandler(str(log3)) as handler_cm:
chain_test.invoke({"foo": "bar"}, {"callbacks": [handler_cm]})
assert "Entering new FakeChain chain" in strip_ansi(log3.read_text())
|
FakeChain
|
python
|
getsentry__sentry
|
tests/sentry/sentry_apps/api/parsers/test_schema.py
|
{
"start": 169,
"end": 7060
}
|
class ____(unittest.TestCase):
def setUp(self) -> None:
self.schema = {
"elements": [
{
"type": "issue-link",
"link": {
"uri": "/sentry/issues/link",
"required_fields": [
{
"type": "select",
"name": "assignee",
"label": "Assignee",
"uri": "/sentry/members",
}
],
},
"create": {
"uri": "/sentry/issues/create",
"required_fields": [
{"type": "text", "name": "title", "label": "Title"},
{"type": "text", "name": "summary", "label": "Summary"},
],
"optional_fields": [
{
"type": "select",
"name": "points",
"label": "Points",
"options": [
["1", "1"],
["2", "2"],
["3", "3"],
["5", "5"],
["8", "8"],
],
},
{
"type": "select",
"name": "assignee",
"label": "Assignee",
"uri": "/sentry/members",
},
],
},
},
{
"type": "alert-rule-action",
"title": "Create task",
"settings": {
"type": "alert-rule-settings",
"uri": "/sentry/alert-rule",
"required_fields": [
{"type": "text", "name": "channel", "label": "Channel"},
{
"type": "select",
"name": "send_email",
"label": "Send Email?",
"options": [["Yes", "yes"], ["No", "no"]],
},
],
},
},
{
"type": "issue-media",
"title": "Feature Demo",
"elements": [{"type": "video", "url": "/sentry/issues/video"}],
},
{"type": "stacktrace-link", "uri": "/sentry/issue"},
]
}
def test_valid_schema_with_options(self) -> None:
validate_ui_element_schema(self.schema)
@invalid_schema_with_error_message("'elements' is a required property")
def test_invalid_schema_elements_missing(self) -> None:
schema = {"type": "nothing"}
validate_ui_element_schema(schema)
@invalid_schema_with_error_message("'elements' should be an array of objects")
def test_invalid_schema_elements_not_array(self) -> None:
schema = {"elements": {"type": "issue-link"}}
validate_ui_element_schema(schema)
@invalid_schema_with_error_message("Each element needs a 'type' field")
def test_invalid_schema_type_missing(self) -> None:
schema = {"elements": [{"key": "issue-link"}]}
validate_ui_element_schema(schema)
@invalid_schema_with_error_message(
"Element has type 'other'. Type must be one of the following: ['issue-link', 'alert-rule-action', 'issue-media', 'stacktrace-link']"
)
def test_invalid_schema_type_invalid(self) -> None:
schema = {"elements": [{"type": "other"}]}
validate_ui_element_schema(schema)
@invalid_schema_with_error_message(
"'uri' is a required property for element of type 'stacktrace-link'"
)
def test_invalid_schema_element_missing_uri(self) -> None:
schema = {
"elements": [{"url": "/stacktrace/github/getsentry/sentry", "type": "stacktrace-link"}]
}
validate_ui_element_schema(schema)
@invalid_schema_with_error_message("Multiple elements of type: stacktrace-link")
def test_multiple_of_same_element_type(self) -> None:
schema = {
"elements": [
{"uri": "/stacktrace/github/getsentry/sentry", "type": "stacktrace-link"},
{"uri": "/stacktrace/github/getsentry/sentry", "type": "stacktrace-link"},
]
}
validate_ui_element_schema(schema)
@invalid_schema_with_error_message(
"Elements of type ['text', 'textarea'] may only have a default value of the following: ['issue.title', 'issue.description'], but issue.something was found."
)
def test_invalid_textarea_default_value(self) -> None:
schema = {
"elements": [
{
"type": "alert-rule-action",
"title": "Mudpuppy",
"settings": {
"type": "alert-rule-settings",
"uri": "/alert-rule-action",
"required_fields": [
{
"label": "Team",
"type": "textarea",
"name": "teamId",
"default": "issue.something",
}
],
},
}
]
}
validate_ui_element_schema(schema)
@invalid_schema_with_error_message(
"Elements of type ['text', 'textarea'] may only have a default value of the following: ['issue.title', 'issue.description'], but issue.someone was found."
)
def test_invalid_text_default_value(self) -> None:
schema = {
"elements": [
{
"type": "alert-rule-action",
"title": "Tater Tots",
"settings": {
"type": "alert-rule-settings",
"uri": "/alert-rule-action",
"optional_fields": [
{
"label": "Team",
"type": "text",
"name": "teamId",
"default": "issue.someone",
}
],
},
}
]
}
validate_ui_element_schema(schema)
|
TestSchemaValidation
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 55456,
"end": 55831
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("starrable_id", "client_mutation_id")
starrable_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="starrableId"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
|
AddStarInput
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/collections.py
|
{
"start": 62840,
"end": 64925
}
|
class ____(_CollectionWithSizes):
"""A collection of n-sided regular polygons."""
_path_generator = mpath.Path.unit_regular_polygon
_factor = np.pi ** (-1/2)
def __init__(self,
numsides,
*,
rotation=0,
sizes=(1,),
**kwargs):
"""
Parameters
----------
numsides : int
The number of sides of the polygon.
rotation : float
The rotation of the polygon in radians.
sizes : tuple of float
The area of the circle circumscribing the polygon in points^2.
**kwargs
Forwarded to `.Collection`.
Examples
--------
See :doc:`/gallery/event_handling/lasso_demo` for a complete example::
offsets = np.random.rand(20, 2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors=facecolors,
edgecolors=("black",),
linewidths=(1,),
offsets=offsets,
offset_transform=ax.transData,
)
"""
super().__init__(**kwargs)
self.set_sizes(sizes)
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
@artist.allow_rasterization
def draw(self, renderer):
self.set_sizes(self._sizes, self.get_figure(root=True).dpi)
self._transforms = [
transforms.Affine2D(x).rotate(-self._rotation).get_matrix()
for x in self._transforms
]
# Explicitly not super().draw, because set_sizes must be called before
# updating self._transforms.
Collection.draw(self, renderer)
|
RegularPolyCollection
|
python
|
huggingface__transformers
|
src/transformers/generation/logits_process.py
|
{
"start": 150468,
"end": 153487
}
|
class ____(LogitsProcessor):
r"""Specialized processor that ensures certain properties around EOS sampling:
1. Only channel 0 can generate EOS
2. If channel 0 has EOS with highest logit, it will be the only candidate
3. If channel 0 has EOS not with highest logit, it will be suppressed
2. and 3. are especially important in contexts where we allow sampling to guarantee the
respective tokens to be (not) sampled.
<Tip warning={true}>
This logits processor is exclusively compatible with
[Dia](https://huggingface.co/docs/transformers/en/model_doc/dia).
</Tip>
Args:
num_channels (`int`):
Number of audio codebooks. Simplifies access to the first channel on the logits.
eos_token_id (`int`):
The id of *end-of-sequence* token.
"""
def __init__(self, num_channels: int, eos_token_id: int):
if num_channels < 1:
raise ValueError(f"Audio codebooks need at least one channel, but found {num_channels} channels.")
if eos_token_id < 1:
raise ValueError(f"Expected `eos_token_id` to be a positive integer, found {eos_token_id} instead.")
self.num_channels = num_channels
self.eos_id = eos_token_id
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# Reshape for easier channel indexing [B, C, V]
scores = scores.reshape(-1, self.num_channels, scores.shape[-1])
# EOS filter
# 1. Condition: Only the first channel can generate the EOS token
# Side condition of disabling generation of special tokens (e.g. audio pad, bos, ...)
# (Assumes them to be greater than audio eos token position)
scores[:, 1:, self.eos_id :] = torch.full_like(
scores[:, 1:, self.eos_id :],
fill_value=-float("inf"),
)
scores[:, 0, self.eos_id + 1 :] = torch.full_like(
scores[:, 0, self.eos_id + 1 :],
fill_value=-float("inf"),
)
# 2+3 Conditions: Force/Suppress EOS if (not) highest logit
# Reshape back to original shape
scores = scores.view(-1, scores.shape[-1])
# Sample highest tokens
top_logit_indices = torch.argmax(scores, dim=-1)
# 2. Force EOS
eos_highest_mask = top_logit_indices == self.eos_id
mask_eos_highest = torch.zeros_like(scores, dtype=torch.bool)
mask_eos_highest[eos_highest_mask, : self.eos_id] = True
scores = scores.masked_fill(mask_eos_highest, -float("inf"))
# 3. Suppress EOS
eos_not_highest_mask = top_logit_indices != self.eos_id
mask_eos_unless_highest = torch.zeros_like(scores, dtype=torch.bool)
mask_eos_unless_highest[eos_not_highest_mask, self.eos_id] = True
scores = scores.masked_fill(mask_eos_unless_highest, -float("inf"))
return scores
|
DiaEOSChannelFilterLogitsProcessor
|
python
|
encode__django-rest-framework
|
tests/test_model_serializer.py
|
{
"start": 44771,
"end": 45234
}
|
class ____(TestCase):
def test_model_field(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneSourceTestModel
fields = ('target',)
target = OneToOneTargetTestModel(id=1, text='abc')
source = OneToOneSourceTestModel(target=target)
serializer = ExampleSerializer(source)
self.assertEqual(serializer.data, {'target': 1})
|
TestModelFieldValues
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/class_attribute.py
|
{
"start": 319,
"end": 1636
}
|
class ____:
a = ""
b = ""
def __init__(self, c):
A.b = _test_source()
self.c = c
self.d = _test_source()
def sink_a(self):
_test_sink(A.a)
def sink_b(self):
# TODO(T145247918): False negative, request from seceng to
# find this issue even without an explicit A().sink_b()
_test_sink(A.b)
def sink_c(self):
_test_sink(self.c)
def sink_d(self):
# TODO(T145247918): False negative, request from seceng to
# find this issue even without an explicit A().sink_d()
_test_sink(self.d)
def class_attribute_A_a_source():
A.a = _test_source()
def class_attribute_A_a_sink():
_test_sink(A.a)
def class_attribute_A_a_flow():
# TODO(T145247918): False negative
class_attribute_A_a_source()
class_attribute_A_a_sink()
def class_attribute_A_a_no_flow():
class_attribute_A_a_sink()
class_attribute_A_a_source()
def class_attribute_A_b_sink():
_test_sink(A.b)
def class_attribute_A_b_flow1():
# TODO(T145247918): False negative
A()
class_attribute_A_b_sink()
def class_attribute_A_b_flow2():
# TODO(T145247918): False negative
A().sink_b()
def instance_attribute_A_c_no_flow():
A().sink_c()
def instance_attribute_A_d_flow():
A().sink_d()
|
A
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-trello/unit_tests/test_components.py
|
{
"start": 132,
"end": 2717
}
|
class ____(Stream):
def __init__(self, records):
self.records = records
def primary_key(self):
return
def read_records(self, sync_mode):
return self.records
# test cases as a list of tuples (boards_records, organizations_records, expected_board_ids)
test_cases = [
(
# test same ids in both boards and organizations
[{"id": "b11111111111111111111111", "name": "board_1"}, {"id": "b22222222222222222222222", "name": "board_2"}],
[{"id": "org111111111111111111111", "idBoards": ["b11111111111111111111111", "b22222222222222222222222"]}],
["b11111111111111111111111", "b22222222222222222222222"],
),
(
# test one different id in organizations
[{"id": "b11111111111111111111111", "name": "board_1"}, {"id": "b22222222222222222222222", "name": "board_2"}],
[{"id": "org111111111111111111111", "idBoards": ["b11111111111111111111111", "b33333333333333333333333"]}],
["b11111111111111111111111", "b22222222222222222222222", "b33333333333333333333333"],
),
(
# test different ids in multiple boards and organizations
[{"id": "b11111111111111111111111", "name": "board_1"}, {"id": "b22222222222222222222222", "name": "board_2"}],
[
{"id": "org111111111111111111111", "idBoards": ["b11111111111111111111111", "b33333333333333333333333"]},
{"id": "org222222222222222222222", "idBoards": ["b00000000000000000000000", "b44444444444444444444444"]},
],
[
"b11111111111111111111111",
"b22222222222222222222222",
"b33333333333333333333333",
"b00000000000000000000000",
"b44444444444444444444444",
],
),
(
# test empty boards and organizations
[],
[],
[],
),
]
@pytest.mark.parametrize("boards_records, organizations_records, expected_board_ids", test_cases)
def test_read_all_boards(components_module, boards_records, organizations_records, expected_board_ids):
OrderIdsPartitionRouter = components_module.OrderIdsPartitionRouter
# Set up mock streams with provided records
partition_router = OrderIdsPartitionRouter(parent_stream_configs=[None], config=None, parameters=None)
boards_stream = MockStream(records=boards_records)
organizations_stream = MockStream(records=organizations_records)
# Call the function and check the result
board_ids = list(partition_router.read_all_boards(boards_stream, organizations_stream))
assert board_ids == expected_board_ids
|
MockStream
|
python
|
doocs__leetcode
|
solution/1100-1199/1177.Can Make Palindrome from Substring/Solution.py
|
{
"start": 0,
"end": 453
}
|
class ____:
def canMakePaliQueries(self, s: str, queries: List[List[int]]) -> List[bool]:
n = len(s)
ss = [[0] * 26 for _ in range(n + 1)]
for i, c in enumerate(s, 1):
ss[i] = ss[i - 1][:]
ss[i][ord(c) - ord("a")] += 1
ans = []
for l, r, k in queries:
cnt = sum((ss[r + 1][j] - ss[l][j]) & 1 for j in range(26))
ans.append(cnt // 2 <= k)
return ans
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/kernel_tests/snapshot_test.py
|
{
"start": 42082,
"end": 49557
}
|
class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_snapshot_dataset(self,
num_threads=1,
repeat=False,
pending_snapshot_expiry_seconds=-1,
shard_size_bytes=None):
def ds_fn():
self.snapshot_dir = os.path.join(self.get_temp_dir(), "snapshot")
if not os.path.exists(self.snapshot_dir):
os.mkdir(self.snapshot_dir)
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(
snapshot.legacy_snapshot(
self.snapshot_dir,
num_writer_threads=num_threads,
writer_buffer_size=2 * num_threads,
num_reader_threads=num_threads,
reader_buffer_size=2 * num_threads,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
shard_size_bytes=shard_size_bytes))
if repeat:
dataset = dataset.repeat(2)
# Turn off `inject_prefetch` optimization. Otherwise, prefetched elements
# are saved and restored in snapshots while tests assume that there is no
# elements prefetched.
options = options_lib.Options()
options.experimental_optimization.inject_prefetch = False
dataset = dataset.with_options(options)
return dataset
return ds_fn
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testSnapshotBeforeEpochEnd(self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
outputs = self.gen_outputs(ds_fn, [], 100, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(100))
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.graph_only_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewStepsSmallShardMultiThread(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
shard_size_bytes=100)
outputs = []
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(ds_fn)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
start = 0
end = 100
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
self._save(sess, saver)
start = 100
end = 400
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
self.assertSequenceEqual(outputs, range(400))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
fp_dir_list = os.listdir(self.snapshot_dir)
self.assertLen(list(fp_dir_list), 2)
for d in fp_dir_list:
if not d.endswith("-graph.pbtxt"):
fp_dir = os.path.join(self.snapshot_dir, d)
run_dir_list = os.listdir(fp_dir)
self.assertLen(list(run_dir_list), 2)
for e in run_dir_list:
if e != "snapshot.metadata":
run_dir = os.path.join(fp_dir, e)
self.assertLen(list(os.listdir(run_dir)), 258)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewSteps(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(200))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewStepsMultipleThreads(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
num_threads=2,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(200))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointAfterOneEpoch(self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
repeat=True,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 1100 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 1100, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(1000)) + list(range(100)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
t = self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(1000)) + list(range(100)) + list(range(900)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointAfterOneEpochThenRunFewSteps(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
repeat=True,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [1100],
1200,
verify_exhausted=False,
save_checkpoint_at_end=False)
self.assertSequenceEqual(
outputs,
list(range(1000)) + list(range(100)) + list(range(100)))
outputs = outputs[:1100]
t = self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)
outputs.extend(t)
self.assertSequenceEqual(
outputs, (list(range(1000)) + list(range(100)) + list(range(900))))
if __name__ == "__main__":
test.main()
|
LegacySnapshotCheckpointTest
|
python
|
geekcomputers__Python
|
bank_managment_system/backend.py
|
{
"start": 27,
"end": 5258
}
|
class ____:
def __init__(self, db_name="bankmanaging.db"):
self.db_path = os.path.join(os.path.dirname(__file__), db_name)
self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
self.cur = self.conn.cursor()
self._setup_tables()
self.acc_no = self._get_last_acc_no() + 1
def _setup_tables(self):
self.cur.execute("""
CREATE TABLE IF NOT EXISTS bank (
acc_no INTEGER PRIMARY KEY,
name TEXT,
age INTEGER,
address TEXT,
balance INTEGER,
account_type TEXT,
mobile_number TEXT
)
""")
self.cur.execute("""
CREATE TABLE IF NOT EXISTS staff (
name TEXT,
pass TEXT,
salary INTEGER,
position TEXT
)
""")
self.cur.execute("CREATE TABLE IF NOT EXISTS admin (name TEXT, pass TEXT)")
self.cur.execute("SELECT COUNT(*) FROM admin")
if self.cur.fetchone()[0] == 0:
self.cur.execute("INSERT INTO admin VALUES (?, ?)", ("admin", "admin123"))
self.conn.commit()
def _get_last_acc_no(self):
self.cur.execute("SELECT MAX(acc_no) FROM bank")
last = self.cur.fetchone()[0]
return last if last else 0
# ----------------- Admin -----------------
def check_admin(self, name, password):
self.cur.execute(
"SELECT 1 FROM admin WHERE name=? AND pass=?", (name, password)
)
return self.cur.fetchone() is not None
# ----------------- Staff -----------------
def create_employee(self, name, password, salary, position):
self.cur.execute(
"INSERT INTO staff VALUES (?, ?, ?, ?)", (name, password, salary, position)
)
self.conn.commit()
def check_employee(self, name, password):
self.cur.execute(
"SELECT 1 FROM staff WHERE name=? AND pass=?", (name, password)
)
return self.cur.fetchone() is not None
def show_employees(self):
self.cur.execute("SELECT name, salary, position FROM staff")
return self.cur.fetchall()
def update_employee(self, field, new_value, name):
if field not in {"name", "pass", "salary", "position"}:
raise ValueError("Invalid employee field")
self.cur.execute(f"UPDATE staff SET {field}=? WHERE name=?", (new_value, name))
self.conn.commit()
def check_name_in_staff(self, name):
self.cur.execute("SELECT 1 FROM staff WHERE name=?", (name,))
return self.cur.fetchone() is not None
# ----------------- Customer -----------------
def create_customer(self, name, age, address, balance, acc_type, mobile_number):
acc_no = self.acc_no
self.cur.execute(
"INSERT INTO bank VALUES (?, ?, ?, ?, ?, ?, ?)",
(acc_no, name, age, address, balance, acc_type, mobile_number),
)
self.conn.commit()
self.acc_no += 1
return acc_no
def check_acc_no(self, acc_no):
self.cur.execute("SELECT 1 FROM bank WHERE acc_no=?", (acc_no,))
return self.cur.fetchone() is not None
def get_details(self, acc_no):
self.cur.execute("SELECT * FROM bank WHERE acc_no=?", (acc_no,))
return self.cur.fetchone()
def get_detail(self, acc_no):
self.cur.execute("SELECT name, balance FROM bank WHERE acc_no=?", (acc_no,))
return self.cur.fetchone()
def update_customer(self, field, new_value, acc_no):
if field not in {"name", "age", "address", "mobile_number", "account_type"}:
raise ValueError("Invalid customer field")
self.cur.execute(
f"UPDATE bank SET {field}=? WHERE acc_no=?", (new_value, acc_no)
)
self.conn.commit()
def update_balance(self, amount, acc_no):
self.cur.execute(
"UPDATE bank SET balance = balance + ? WHERE acc_no=?", (amount, acc_no)
)
self.conn.commit()
def deduct_balance(self, amount, acc_no):
self.cur.execute("SELECT balance FROM bank WHERE acc_no=?", (acc_no,))
bal = self.cur.fetchone()
if bal and bal[0] >= amount:
self.cur.execute(
"UPDATE bank SET balance=balance-? WHERE acc_no=?", (amount, acc_no)
)
self.conn.commit()
return True
return False
def check_balance(self, acc_no):
self.cur.execute("SELECT balance FROM bank WHERE acc_no=?", (acc_no,))
bal = self.cur.fetchone()
return bal[0] if bal else 0
def list_all_customers(self):
self.cur.execute("SELECT * FROM bank")
return self.cur.fetchall()
def delete_acc(self, acc_no):
self.cur.execute("DELETE FROM bank WHERE acc_no=?", (acc_no,))
self.conn.commit()
# ----------------- Stats -----------------
def all_money(self):
self.cur.execute("SELECT SUM(balance) FROM bank")
total = self.cur.fetchone()[0]
return total if total else 0
# ----------------- Cleanup -----------------
def close(self):
self.conn.close()
|
DatabaseManager
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_product_dimension_performance_report.py
|
{
"start": 27895,
"end": 37016
}
|
class ____(TestBaseProductDimensionPerformanceReport):
stream_name = "product_dimension_performance_report_monthly"
report_file = "product_dimension_performance_report_monthly"
incremental_report_file = "product_dimension_performance_report_monthly_incremental"
incremental_report_file_with_records_further_cursor = (
"product_dimension_performance_report_monthly_incremental_with_records_further_cursor"
)
report_file_with_records_further_start_date = "product_dimension_performance_report_monthly_with_records_further_start_date"
records_number = 8
state_file = "product_dimension_performance_report_monthly_state"
state_file_legacy = "product_dimension_performance_report_monthly_state"
def mock_report_apis(self):
super().mock_report_apis()
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductDimensionPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductDimensionPerformanceReportRequest", "Aggregation": "Monthly", "Columns": ["TimePeriod", "AccountName", "AccountNumber", "AdGroupName", "AdGroupId", "CampaignStatus", "AccountStatus", "AdGroupStatus", "Network", "AdId", "CampaignId", "CampaignName", "CurrencyCode", "DeviceType", "Language", "MerchantProductId", "Title", "Condition", "Brand", "Price", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Revenue", "RevenuePerConversion", "SellerName", "OfferLanguage", "CountryOfSale", "AdStatus", "AdDistribution", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "ReturnOnAdSpend", "BidStrategyType", "LocalStoreCode", "StoreId", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "CostPerConversion", "ViewThroughConversions", "Goal", "GoalType", "ProductBought", "QuantityBought", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "ViewThroughConversionsQualified", "ProductBoughtTitle", "GTIN", "MPN", "ViewThroughRevenue", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall", "CampaignType", "AssetGroupId", "AssetGroupName", "AssetGroupStatus", "CustomLabel0", "CustomLabel1", "CustomLabel2", "CustomLabel3", "CustomLabel4", "ProductType1", "ProductType2", "ProductType3", "ProductType4", "ProductType5"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductDimensionPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductDimensionPerformanceReportRequest", "Aggregation": "Monthly", "Columns": ["TimePeriod", "AccountName", "AccountNumber", "AdGroupName", "AdGroupId", "CampaignStatus", "AccountStatus", "AdGroupStatus", "Network", "AdId", "CampaignId", "CampaignName", "CurrencyCode", "DeviceType", "Language", "MerchantProductId", "Title", "Condition", "Brand", "Price", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Revenue", "RevenuePerConversion", "SellerName", "OfferLanguage", "CountryOfSale", "AdStatus", "AdDistribution", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "ReturnOnAdSpend", "BidStrategyType", "LocalStoreCode", "StoreId", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "CostPerConversion", "ViewThroughConversions", "Goal", "GoalType", "ProductBought", "QuantityBought", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "ViewThroughConversionsQualified", "ProductBoughtTitle", "GTIN", "MPN", "ViewThroughRevenue", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall", "CampaignType", "AssetGroupId", "AssetGroupName", "AssetGroupStatus", "CustomLabel0", "CustomLabel1", "CustomLabel2", "CustomLabel3", "CustomLabel4", "ProductType1", "ProductType2", "ProductType3", "ProductType4", "ProductType5"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductDimensionPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductDimensionPerformanceReportRequest", "Aggregation": "Monthly", "Columns": ["TimePeriod", "AccountName", "AccountNumber", "AdGroupName", "AdGroupId", "CampaignStatus", "AccountStatus", "AdGroupStatus", "Network", "AdId", "CampaignId", "CampaignName", "CurrencyCode", "DeviceType", "Language", "MerchantProductId", "Title", "Condition", "Brand", "Price", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Revenue", "RevenuePerConversion", "SellerName", "OfferLanguage", "CountryOfSale", "AdStatus", "AdDistribution", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "ReturnOnAdSpend", "BidStrategyType", "LocalStoreCode", "StoreId", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "CostPerConversion", "ViewThroughConversions", "Goal", "GoalType", "ProductBought", "QuantityBought", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "ViewThroughConversionsQualified", "ProductBoughtTitle", "GTIN", "MPN", "ViewThroughRevenue", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall", "CampaignType", "AssetGroupId", "AssetGroupName", "AssetGroupStatus", "CustomLabel0", "CustomLabel1", "CustomLabel2", "CustomLabel3", "CustomLabel4", "ProductType1", "ProductType2", "ProductType3", "ProductType4", "ProductType5"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductDimensionPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductDimensionPerformanceReportRequest", "Aggregation": "Monthly", "Columns": ["TimePeriod", "AccountName", "AccountNumber", "AdGroupName", "AdGroupId", "CampaignStatus", "AccountStatus", "AdGroupStatus", "Network", "AdId", "CampaignId", "CampaignName", "CurrencyCode", "DeviceType", "Language", "MerchantProductId", "Title", "Condition", "Brand", "Price", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Revenue", "RevenuePerConversion", "SellerName", "OfferLanguage", "CountryOfSale", "AdStatus", "AdDistribution", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "ReturnOnAdSpend", "BidStrategyType", "LocalStoreCode", "StoreId", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "CostPerConversion", "ViewThroughConversions", "Goal", "GoalType", "ProductBought", "QuantityBought", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "ViewThroughConversionsQualified", "ProductBoughtTitle", "GTIN", "MPN", "ViewThroughRevenue", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall", "CampaignType", "AssetGroupId", "AssetGroupName", "AssetGroupStatus", "CustomLabel0", "CustomLabel1", "CustomLabel2", "CustomLabel3", "CustomLabel4", "ProductType1", "ProductType2", "ProductType3", "ProductType4", "ProductType5"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
|
TestProductDimensionPerformanceReportMonthlyStream
|
python
|
scrapy__scrapy
|
scrapy/extensions/httpcache.py
|
{
"start": 1792,
"end": 9737
}
|
class ____:
MAXAGE = 3600 * 24 * 365 # one year
def __init__(self, settings: BaseSettings):
self.always_store: bool = settings.getbool("HTTPCACHE_ALWAYS_STORE")
self.ignore_schemes: list[str] = settings.getlist("HTTPCACHE_IGNORE_SCHEMES")
self._cc_parsed: WeakKeyDictionary[
Request | Response, dict[bytes, bytes | None]
] = WeakKeyDictionary()
self.ignore_response_cache_controls: list[bytes] = [
to_bytes(cc)
for cc in settings.getlist("HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS")
]
def _parse_cachecontrol(self, r: Request | Response) -> dict[bytes, bytes | None]:
if r not in self._cc_parsed:
cch = r.headers.get(b"Cache-Control", b"")
assert cch is not None
parsed = parse_cachecontrol(cch)
if isinstance(r, Response):
for key in self.ignore_response_cache_controls:
parsed.pop(key, None)
self._cc_parsed[r] = parsed
return self._cc_parsed[r]
def should_cache_request(self, request: Request) -> bool:
if urlparse_cached(request).scheme in self.ignore_schemes:
return False
cc = self._parse_cachecontrol(request)
# obey user-agent directive "Cache-Control: no-store"
return b"no-store" not in cc
def should_cache_response(self, response: Response, request: Request) -> bool:
# What is cacheable - https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.1
# Response cacheability - https://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.4
# Status code 206 is not included because cache can not deal with partial contents
cc = self._parse_cachecontrol(response)
# obey directive "Cache-Control: no-store"
if b"no-store" in cc:
return False
# Never cache 304 (Not Modified) responses
if response.status == 304:
return False
# Cache unconditionally if configured to do so
if self.always_store:
return True
# Any hint on response expiration is good
if b"max-age" in cc or b"Expires" in response.headers:
return True
# Firefox fallbacks this statuses to one year expiration if none is set
if response.status in (300, 301, 308):
return True
# Other statuses without expiration requires at least one validator
if response.status in (200, 203, 401):
return b"Last-Modified" in response.headers or b"ETag" in response.headers
# Any other is probably not eligible for caching
# Makes no sense to cache responses that does not contain expiration
# info and can not be revalidated
return False
def is_cached_response_fresh(
self, cachedresponse: Response, request: Request
) -> bool:
cc = self._parse_cachecontrol(cachedresponse)
ccreq = self._parse_cachecontrol(request)
if b"no-cache" in cc or b"no-cache" in ccreq:
return False
now = time()
freshnesslifetime = self._compute_freshness_lifetime(
cachedresponse, request, now
)
currentage = self._compute_current_age(cachedresponse, request, now)
reqmaxage = self._get_max_age(ccreq)
if reqmaxage is not None:
freshnesslifetime = min(freshnesslifetime, reqmaxage)
if currentage < freshnesslifetime:
return True
if b"max-stale" in ccreq and b"must-revalidate" not in cc:
# From RFC2616: "Indicates that the client is willing to
# accept a response that has exceeded its expiration time.
# If max-stale is assigned a value, then the client is
# willing to accept a response that has exceeded its
# expiration time by no more than the specified number of
# seconds. If no value is assigned to max-stale, then the
# client is willing to accept a stale response of any age."
staleage = ccreq[b"max-stale"]
if staleage is None:
return True
try:
if currentage < freshnesslifetime + max(0, int(staleage)):
return True
except ValueError:
pass
# Cached response is stale, try to set validators if any
self._set_conditional_validators(request, cachedresponse)
return False
def is_cached_response_valid(
self, cachedresponse: Response, response: Response, request: Request
) -> bool:
# Use the cached response if the new response is a server error,
# as long as the old response didn't specify must-revalidate.
if response.status >= 500:
cc = self._parse_cachecontrol(cachedresponse)
if b"must-revalidate" not in cc:
return True
# Use the cached response if the server says it hasn't changed.
return response.status == 304
def _set_conditional_validators(
self, request: Request, cachedresponse: Response
) -> None:
if b"Last-Modified" in cachedresponse.headers:
request.headers[b"If-Modified-Since"] = cachedresponse.headers[
b"Last-Modified"
]
if b"ETag" in cachedresponse.headers:
request.headers[b"If-None-Match"] = cachedresponse.headers[b"ETag"]
def _get_max_age(self, cc: dict[bytes, bytes | None]) -> int | None:
try:
return max(0, int(cc[b"max-age"])) # type: ignore[arg-type]
except (KeyError, ValueError):
return None
def _compute_freshness_lifetime(
self, response: Response, request: Request, now: float
) -> float:
# Reference nsHttpResponseHead::ComputeFreshnessLifetime
# https://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#706
cc = self._parse_cachecontrol(response)
maxage = self._get_max_age(cc)
if maxage is not None:
return maxage
# Parse date header or synthesize it if none exists
date = rfc1123_to_epoch(response.headers.get(b"Date")) or now
# Try HTTP/1.0 Expires header
if b"Expires" in response.headers:
expires = rfc1123_to_epoch(response.headers[b"Expires"])
# When parsing Expires header fails RFC 2616 section 14.21 says we
# should treat this as an expiration time in the past.
return max(0, expires - date) if expires else 0
# Fallback to heuristic using last-modified header
# This is not in RFC but on Firefox caching implementation
lastmodified = rfc1123_to_epoch(response.headers.get(b"Last-Modified"))
if lastmodified and lastmodified <= date:
return (date - lastmodified) / 10
# This request can be cached indefinitely
if response.status in (300, 301, 308):
return self.MAXAGE
# Insufficient information to compute freshness lifetime
return 0
def _compute_current_age(
self, response: Response, request: Request, now: float
) -> float:
# Reference nsHttpResponseHead::ComputeCurrentAge
# https://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#658
currentage: float = 0
# If Date header is not set we assume it is a fast connection, and
# clock is in sync with the server
date = rfc1123_to_epoch(response.headers.get(b"Date")) or now
if now > date:
currentage = now - date
if b"Age" in response.headers:
try:
age = int(response.headers[b"Age"]) # type: ignore[arg-type]
currentage = max(currentage, age)
except ValueError:
pass
return currentage
|
RFC2616Policy
|
python
|
astropy__astropy
|
astropy/coordinates/tests/test_earth.py
|
{
"start": 3476,
"end": 16723
}
|
class ____:
def setup_method(self):
self.lon = Longitude(
[0.0, 45.0, 90.0, 135.0, 180.0, -180, -90, -45],
u.deg,
wrap_angle=180 * u.deg,
)
self.lat = Latitude([+0.0, 30.0, 60.0, +90.0, -90.0, -60.0, -30.0, 0.0], u.deg)
self.h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11.0, -0.1], u.m)
self.location = EarthLocation.from_geodetic(self.lon, self.lat, self.h)
self.x, self.y, self.z = self.location.to_geocentric()
def test_default_ellipsoid(self):
assert self.location.ellipsoid == EarthLocation._ellipsoid
def test_geo_attributes(self):
assert all(
np.all(_1 == _2)
for _1, _2 in zip(self.location.geodetic, self.location.to_geodetic())
)
assert all(
np.all(_1 == _2)
for _1, _2 in zip(self.location.geocentric, self.location.to_geocentric())
)
def test_attribute_classes(self):
"""Test that attribute classes are correct (and not EarthLocation)"""
assert type(self.location.x) is u.Quantity
assert type(self.location.y) is u.Quantity
assert type(self.location.z) is u.Quantity
assert type(self.location.lon) is Longitude
assert type(self.location.lat) is Latitude
assert type(self.location.height) is u.Quantity
def test_input(self):
"""Check input is parsed correctly"""
# units of length should be assumed geocentric
geocentric = EarthLocation(self.x, self.y, self.z)
assert np.all(geocentric == self.location)
geocentric2 = EarthLocation(
self.x.value, self.y.value, self.z.value, self.x.unit
)
assert np.all(geocentric2 == self.location)
geodetic = EarthLocation(self.lon, self.lat, self.h)
assert np.all(geodetic == self.location)
geodetic2 = EarthLocation(
self.lon.to_value(u.degree),
self.lat.to_value(u.degree),
self.h.to_value(u.m),
)
assert np.all(geodetic2 == self.location)
geodetic3 = EarthLocation(self.lon, self.lat)
assert allclose_m14(geodetic3.lon.value, self.location.lon.value)
assert allclose_m14(geodetic3.lat.value, self.location.lat.value)
assert not np.any(
isclose_m14(geodetic3.height.value, self.location.height.value)
)
geodetic4 = EarthLocation(self.lon, self.lat, self.h[-1])
assert allclose_m14(geodetic4.lon.value, self.location.lon.value)
assert allclose_m14(geodetic4.lat.value, self.location.lat.value)
assert allclose_m14(geodetic4.height[-1].value, self.location.height[-1].value)
assert not np.any(
isclose_m14(geodetic4.height[:-1].value, self.location.height[:-1].value)
)
# check length unit preservation
geocentric5 = EarthLocation(self.x, self.y, self.z, u.pc)
assert geocentric5.unit is u.pc
assert geocentric5.x.unit is u.pc
assert geocentric5.height.unit is u.pc
assert allclose_m14(geocentric5.x.to_value(self.x.unit), self.x.value)
geodetic5 = EarthLocation(self.lon, self.lat, self.h.to(u.pc))
assert geodetic5.unit is u.pc
assert geodetic5.x.unit is u.pc
assert geodetic5.height.unit is u.pc
assert allclose_m14(geodetic5.x.to_value(self.x.unit), self.x.value)
def test_invalid_input(self):
"""Check invalid input raises exception"""
# incomprehensible by either raises TypeError
with pytest.raises(TypeError):
EarthLocation(self.lon, self.y, self.z)
# wrong units
with pytest.raises(u.UnitsError, match="should be in units of length"):
EarthLocation.from_geocentric(self.lon, self.lat, self.lat)
# inconsistent units
with pytest.raises(u.UnitsError, match="should all be consistent"):
EarthLocation.from_geocentric(self.h, self.lon, self.lat)
# floats without a unit
with pytest.raises(TypeError):
EarthLocation.from_geocentric(self.x.value, self.y.value, self.z.value)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geocentric(self.x, self.y, self.z[:5])
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geodetic(self.x, self.y, self.z)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h[:5])
def test_slicing(self):
# test on WGS72 location, so we can check the ellipsoid is passed on
locwgs72 = EarthLocation.from_geodetic(
self.lon, self.lat, self.h, ellipsoid="WGS72"
)
loc_slice1 = locwgs72[4]
assert isinstance(loc_slice1, EarthLocation)
assert loc_slice1.unit is locwgs72.unit
assert loc_slice1.ellipsoid == locwgs72.ellipsoid == "WGS72"
assert not loc_slice1.shape
with pytest.raises(TypeError):
loc_slice1[0]
with pytest.raises(IndexError):
len(loc_slice1)
loc_slice2 = locwgs72[4:6]
assert isinstance(loc_slice2, EarthLocation)
assert len(loc_slice2) == 2
assert loc_slice2.unit is locwgs72.unit
assert loc_slice2.ellipsoid == locwgs72.ellipsoid
assert loc_slice2.shape == (2,)
loc_x = locwgs72["x"]
assert type(loc_x) is u.Quantity
assert loc_x.shape == locwgs72.shape
assert loc_x.unit is locwgs72.unit
def test_invalid_ellipsoid(self):
# unknown ellipsoid
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h, ellipsoid="foo")
with pytest.raises(TypeError):
EarthLocation(self.lon, self.lat, self.h, ellipsoid="foo")
with pytest.raises(ValueError):
self.location.ellipsoid = "foo"
with pytest.raises(ValueError):
self.location.to_geodetic("foo")
@pytest.mark.parametrize("ellipsoid", ELLIPSOIDS)
def test_ellipsoid(self, ellipsoid):
"""Test that different ellipsoids are understood, and differ"""
# check that heights differ for different ellipsoids
# need different tolerance, since heights are relative to ~6000 km
lon, lat, h = self.location.to_geodetic(ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m8(h.value, self.h.value)
else:
# Some heights are very similar for some; some lon, lat identical.
assert not np.all(isclose_m8(h.value, self.h.value))
# given lon, lat, height, check that x,y,z differ
location = EarthLocation.from_geodetic(
self.lon, self.lat, self.h, ellipsoid=ellipsoid
)
if ellipsoid == self.location.ellipsoid:
assert allclose_m14(location.z.value, self.z.value)
else:
assert not np.all(isclose_m14(location.z.value, self.z.value))
def test_to_value(self):
loc = self.location
loc_ndarray = loc.view(np.ndarray)
assert np.all(loc.value == loc_ndarray)
loc2 = self.location.to(u.km)
loc2_ndarray = np.empty_like(loc_ndarray)
for coo in "x", "y", "z":
loc2_ndarray[coo] = loc_ndarray[coo] / 1000.0
assert np.all(loc2.value == loc2_ndarray)
loc2_value = self.location.to_value(u.km)
assert np.all(loc2_value == loc2_ndarray)
def test_pickling():
"""Regression test against #4304."""
el = EarthLocation(0.0 * u.m, 6000 * u.km, 6000 * u.km)
s = pickle.dumps(el)
el2 = pickle.loads(s)
assert el == el2
def test_repr_latex():
"""
Regression test for issue #4542
"""
somelocation = EarthLocation(lon="149:3:57.9", lat="-31:16:37.3")
somelocation._repr_latex_()
somelocation2 = EarthLocation(lon=[1.0, 2.0] * u.deg, lat=[-1.0, 9.0] * u.deg)
somelocation2._repr_latex_()
@pytest.mark.remote_data
# TODO: this parametrize should include a second option with a valid Google API
# key. For example, we should make an API key for Astropy, and add it to GitHub Actions
# as an environment variable (for security).
@pytest.mark.parametrize("google_api_key", [None])
def test_of_address(google_api_key):
NYC_lon = -74.0 * u.deg
NYC_lat = 40.7 * u.deg
# ~10 km tolerance to address difference between OpenStreetMap and Google
# for "New York, NY". This doesn't matter in practice because this test is
# only used to verify that the query succeeded, not that the returned
# position is precise.
NYC_tol = 0.1 * u.deg
# just a location
try:
loc = EarthLocation.of_address("New York, NY")
except NameResolveError as e:
# API limit might surface even here in CI.
if "unknown failure with" not in str(e):
pytest.xfail(str(e))
else:
assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol)
assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol)
assert np.allclose(loc.height.value, 0.0)
# Put this one here as buffer to get around Google map API limit per sec.
# no match: This always raises NameResolveError
with pytest.raises(NameResolveError):
EarthLocation.of_address("lkjasdflkja")
if google_api_key is not None:
# a location and height
try:
loc = EarthLocation.of_address("New York, NY", get_height=True)
except NameResolveError as e:
# Buffer above sometimes insufficient to get around API limit but
# we also do not want to drag things out with time.sleep(0.195),
# where 0.195 was empirically determined on some physical machine.
pytest.xfail(str(e.value))
else:
assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol)
assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol)
assert quantity_allclose(loc.height, 10.438 * u.meter, atol=1.0 * u.cm)
def test_geodetic_tuple():
lat = 2 * u.deg
lon = 10 * u.deg
height = 100 * u.m
el = EarthLocation.from_geodetic(lat=lat, lon=lon, height=height)
res1 = el.to_geodetic()
res2 = el.geodetic
assert res1.lat == res2.lat and quantity_allclose(res1.lat, lat)
assert res1.lon == res2.lon and quantity_allclose(res1.lon, lon)
assert res1.height == res2.height and quantity_allclose(res1.height, height)
def test_gravitational_redshift():
someloc = EarthLocation(lon=-87.7 * u.deg, lat=37 * u.deg)
sometime = Time("2017-8-21 18:26:40")
zg0 = someloc.gravitational_redshift(sometime)
# should be of order ~few mm/s change per week
zg_week = someloc.gravitational_redshift(sometime + 7 * u.day)
assert 1.0 * u.mm / u.s < abs(zg_week - zg0) < 1 * u.cm / u.s
# ~cm/s over a half-year
zg_halfyear = someloc.gravitational_redshift(sometime + 0.5 * u.yr)
assert 1 * u.cm / u.s < abs(zg_halfyear - zg0) < 1 * u.dm / u.s
# but when back to the same time in a year, should be tenths of mm
# even over decades
zg_year = someloc.gravitational_redshift(sometime - 20 * u.year)
assert 0.1 * u.mm / u.s < abs(zg_year - zg0) < 1 * u.mm / u.s
# Check mass adjustments.
# If Jupiter and the moon are ignored, effect should be off by ~ .5 mm/s
masses = {
"sun": constants.G * constants.M_sun,
"jupiter": 0 * constants.G * u.kg,
"moon": 0 * constants.G * u.kg,
}
zg_moonjup = someloc.gravitational_redshift(sometime, masses=masses)
assert 0.1 * u.mm / u.s < abs(zg_moonjup - zg0) < 1 * u.mm / u.s
# Check that simply not including the bodies gives the same result.
assert zg_moonjup == someloc.gravitational_redshift(sometime, bodies=("sun",))
# And that earth can be given, even not as last argument
assert zg_moonjup == someloc.gravitational_redshift(
sometime, bodies=("earth", "sun")
)
# If the earth is also ignored, effect should be off by ~ 20 cm/s
# This also tests the conversion of kg to gravitational units.
masses["earth"] = 0 * u.kg
zg_moonjupearth = someloc.gravitational_redshift(sometime, masses=masses)
assert 1 * u.dm / u.s < abs(zg_moonjupearth - zg0) < 1 * u.m / u.s
# If all masses are zero, redshift should be 0 as well.
masses["sun"] = 0 * u.kg
assert someloc.gravitational_redshift(sometime, masses=masses) == 0
with pytest.raises(KeyError):
someloc.gravitational_redshift(sometime, bodies=("saturn",))
with pytest.raises(u.UnitsError):
masses = {
"sun": constants.G * constants.M_sun,
"jupiter": constants.G * constants.M_jup,
"moon": 1 * u.km, # wrong units!
"earth": constants.G * constants.M_earth,
}
someloc.gravitational_redshift(sometime, masses=masses)
def test_read_only_input():
lon = np.array([80.0, 440.0]) * u.deg
lat = np.array([45.0]) * u.deg
lon.flags.writeable = lat.flags.writeable = False
loc = EarthLocation.from_geodetic(lon=lon, lat=lat)
assert quantity_allclose(loc[1].x, loc[0].x)
|
TestInput
|
python
|
mlflow__mlflow
|
mlflow/transformers/__init__.py
|
{
"start": 73266,
"end": 132314
}
|
class ____:
def __init__(self, pipeline, flavor_config=None, model_config=None, prompt_template=None):
self.pipeline = pipeline
self.flavor_config = flavor_config
# The predict method updates the model_config several times. This should be done over a
# deep copy of the original model_config that was specified by the user, otherwise the
# prediction won't be idempotent. Hence we creates an immutable dictionary of the original
# model config here and enforce creating a deep copy at every predict call.
self.model_config = MappingProxyType(model_config or {})
self.prompt_template = prompt_template
self._conversation = None
# NB: Current special-case custom pipeline types that have not been added to
# the native-supported transformers package but require custom parsing:
# InstructionTextGenerationPipeline [Dolly] https://huggingface.co/databricks/dolly-v2-12b
# (and all variants)
self._supported_custom_generator_types = {"InstructionTextGenerationPipeline"}
self.llm_inference_task = (
self.flavor_config.get(_LLM_INFERENCE_TASK_KEY) if self.flavor_config else None
)
def get_raw_model(self):
"""
Returns the underlying model.
"""
return self.pipeline
def _convert_pandas_to_dict(self, data):
import transformers
if not isinstance(self.pipeline, transformers.ZeroShotClassificationPipeline):
return data.to_dict(orient="records")
else:
# NB: The ZeroShotClassificationPipeline requires an input in the form of
# Dict[str, Union[str, List[str]]] and will throw if an additional nested
# List is present within the List value (which is what the duplicated values
# within the orient="list" conversion in Pandas will do. This parser will
# deduplicate label lists to a single list.
unpacked = data.to_dict(orient="list")
parsed = {}
for key, value in unpacked.items():
if isinstance(value, list):
contents = []
for item in value:
# Deduplication logic
if item not in contents:
contents.append(item)
# Collapse nested lists to return the correct data structure for the
# ZeroShotClassificationPipeline input structure
parsed[key] = (
contents
if all(isinstance(item, str) for item in contents) and len(contents) > 1
else contents[0]
)
return parsed
def _merge_model_config_with_params(self, model_config, params):
if params:
_logger.warning(
"params provided to the `predict` method will override the inference "
"configuration saved with the model. If the params provided are not "
"valid for the pipeline, MlflowException will be raised."
)
# Override the inference configuration with any additional kwargs provided by the user.
return {**model_config, **params}
else:
return model_config
def _validate_model_config_and_return_output(self, data, model_config, return_tensors=False):
import transformers
if return_tensors:
model_config["return_tensors"] = True
if model_config.get("return_full_text", None) is not None:
_logger.warning(
"The `return_full_text` parameter is mutually exclusive with the "
"`return_tensors` parameter set when a MLflow inference task is provided. "
"The `return_full_text` parameter will be ignored."
)
# `return_full_text` is mutually exclusive with `return_tensors`
model_config["return_full_text"] = None
try:
if isinstance(data, dict):
return self.pipeline(**data, **model_config)
return self.pipeline(data, **model_config)
except ValueError as e:
if "The following `model_kwargs` are not used by the model" in str(e):
raise MlflowException.invalid_parameter_value(
"The params provided to the `predict` method are not valid "
f"for pipeline {type(self.pipeline).__name__}.",
) from e
if isinstance(
self.pipeline,
(
transformers.AutomaticSpeechRecognitionPipeline,
transformers.AudioClassificationPipeline,
),
) and (
# transformers <= 4.33.3
"Malformed soundfile" in str(e)
# transformers > 4.33.3
or "Soundfile is either not in the correct format or is malformed" in str(e)
):
raise MlflowException.invalid_parameter_value(
"Failed to process the input audio data. Either the audio file is "
"corrupted or a uri was passed in without overriding the default model "
"signature. If submitting a string uri, please ensure that the model has "
"been saved with a signature that defines a string input type.",
) from e
raise
def predict(self, data, params: dict[str, Any] | None = None):
"""
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
Returns:
Model predictions.
"""
# NB: This `predict` method updates the model_config several times. To make the predict
# call idempotent, we keep the original self.model_config immutable and creates a deep
# copy of it at every predict call.
model_config = copy.deepcopy(dict(self.model_config))
params = self._merge_model_config_with_params(model_config, params)
if self.llm_inference_task == _LLM_INFERENCE_TASK_CHAT:
data, params = preprocess_llm_inference_input(data, params, self.flavor_config)
data = [convert_messages_to_prompt(msgs, self.pipeline.tokenizer) for msgs in data]
elif self.llm_inference_task == _LLM_INFERENCE_TASK_COMPLETIONS:
data, params = preprocess_llm_inference_input(data, params, self.flavor_config)
elif self.llm_inference_task == _LLM_INFERENCE_TASK_EMBEDDING:
data, params = preprocess_llm_embedding_params(data)
if isinstance(data, pd.DataFrame):
input_data = self._convert_pandas_to_dict(data)
elif isinstance(data, (dict, str, bytes, np.ndarray)):
input_data = data
elif isinstance(data, list):
if not all(isinstance(entry, (str, dict)) for entry in data):
raise MlflowException(
"Invalid data submission. Ensure all elements in the list are strings "
"or dictionaries. If dictionaries are supplied, all keys in the "
"dictionaries must be strings and values must be either str or List[str].",
error_code=INVALID_PARAMETER_VALUE,
)
input_data = data
else:
raise MlflowException(
"Input data must be either a pandas.DataFrame, a string, bytes, List[str], "
"List[Dict[str, str]], List[Dict[str, Union[str, List[str]]]], "
"or Dict[str, Union[str, List[str]]].",
error_code=INVALID_PARAMETER_VALUE,
)
input_data = self._parse_raw_pipeline_input(input_data)
# Validate resolved or input dict types
if isinstance(input_data, dict):
_validate_input_dictionary_contains_only_strings_and_lists_of_strings(input_data)
elif isinstance(input_data, list) and all(isinstance(entry, dict) for entry in input_data):
# Validate each dict inside an input List[Dict]
all(
_validate_input_dictionary_contains_only_strings_and_lists_of_strings(x)
for x in input_data
)
return self._predict(input_data, params)
def _predict(self, data, model_config):
import transformers
# NB: the ordering of these conditional statements matters. TranslationPipeline and
# SummarizationPipeline both inherit from TextGenerationPipeline (they are subclasses)
# in which the return data structure from their __call__ implementation is modified.
if isinstance(self.pipeline, transformers.TranslationPipeline):
self._validate_str_or_list_str(data)
output_key = "translation_text"
elif isinstance(self.pipeline, transformers.SummarizationPipeline):
self._validate_str_or_list_str(data)
data = self._format_prompt_template(data)
output_key = "summary_text"
elif isinstance(self.pipeline, transformers.Text2TextGenerationPipeline):
data = self._parse_text2text_input(data)
data = self._format_prompt_template(data)
output_key = "generated_text"
elif isinstance(self.pipeline, transformers.TextGenerationPipeline):
self._validate_str_or_list_str(data)
data = self._format_prompt_template(data)
output_key = "generated_text"
elif isinstance(self.pipeline, transformers.QuestionAnsweringPipeline):
data = self._parse_question_answer_input(data)
output_key = "answer"
elif isinstance(self.pipeline, transformers.FillMaskPipeline):
self._validate_str_or_list_str(data)
data = self._format_prompt_template(data)
output_key = "token_str"
elif isinstance(self.pipeline, transformers.TextClassificationPipeline):
output_key = "label"
elif isinstance(self.pipeline, transformers.ImageClassificationPipeline):
data = self._convert_image_input(data)
output_key = "label"
elif isinstance(self.pipeline, transformers.ZeroShotClassificationPipeline):
output_key = "labels"
data = self._parse_json_encoded_list(data, "candidate_labels")
elif isinstance(self.pipeline, transformers.TableQuestionAnsweringPipeline):
output_key = "answer"
data = self._parse_json_encoded_dict_payload_to_dict(data, "table")
elif isinstance(self.pipeline, transformers.TokenClassificationPipeline):
output_key = {"entity_group", "entity"}
elif isinstance(self.pipeline, transformers.FeatureExtractionPipeline):
output_key = None
data = self._parse_feature_extraction_input(data)
data = self._format_prompt_template(data)
elif _is_conversational_pipeline(self.pipeline):
output_key = None
if not self._conversation:
# this import is valid if conversational_pipeline is not None
self._conversation = transformers.Conversation()
self._conversation.add_user_input(data)
elif type(self.pipeline).__name__ in self._supported_custom_generator_types:
self._validate_str_or_list_str(data)
output_key = "generated_text"
elif isinstance(self.pipeline, transformers.AutomaticSpeechRecognitionPipeline):
if model_config.get("return_timestamps", None) in ["word", "char"]:
output_key = None
else:
output_key = "text"
data = self._convert_audio_input(data)
elif isinstance(self.pipeline, transformers.AudioClassificationPipeline):
data = self._convert_audio_input(data)
output_key = None
else:
raise MlflowException(
f"The loaded pipeline type {type(self.pipeline).__name__} is "
"not enabled for pyfunc predict functionality.",
error_code=BAD_REQUEST,
)
# Optional input preservation for specific pipeline types. This is True (include raw
# formatting output), but if `include_prompt` is set to False in the `model_config`
# option during model saving, excess newline characters and the fed-in prompt will be
# trimmed out from the start of the response.
include_prompt = model_config.pop("include_prompt", True)
# Optional stripping out of `\n` for specific generator pipelines.
collapse_whitespace = model_config.pop("collapse_whitespace", False)
data = self._convert_cast_lists_from_np_back_to_list(data)
# Generate inference data with the pipeline object
if _is_conversational_pipeline(self.pipeline):
conversation_output = self.pipeline(self._conversation)
return conversation_output.generated_responses[-1]
else:
# If inference task is defined, return tensors internally to get usage information
return_tensors = False
if self.llm_inference_task:
return_tensors = True
output_key = "generated_token_ids"
raw_output = self._validate_model_config_and_return_output(
data, model_config=model_config, return_tensors=return_tensors
)
# Handle the pipeline outputs
if type(self.pipeline).__name__ in self._supported_custom_generator_types or isinstance(
self.pipeline, transformers.TextGenerationPipeline
):
output = self._strip_input_from_response_in_instruction_pipelines(
data,
raw_output,
output_key,
self.flavor_config,
include_prompt,
collapse_whitespace,
)
if self.llm_inference_task:
output = postprocess_output_for_llm_inference_task(
data,
output,
self.pipeline,
self.flavor_config,
model_config,
self.llm_inference_task,
)
elif isinstance(self.pipeline, transformers.FeatureExtractionPipeline):
if self.llm_inference_task:
output = [np.array(tensor[0][0]) for tensor in raw_output]
output = postprocess_output_for_llm_v1_embedding_task(
data, output, self.pipeline.tokenizer
)
else:
return self._parse_feature_extraction_output(raw_output)
elif isinstance(self.pipeline, transformers.FillMaskPipeline):
output = self._parse_list_of_multiple_dicts(raw_output, output_key)
elif isinstance(self.pipeline, transformers.ZeroShotClassificationPipeline):
return self._flatten_zero_shot_text_classifier_output_to_df(raw_output)
elif isinstance(self.pipeline, transformers.TokenClassificationPipeline):
output = self._parse_tokenizer_output(raw_output, output_key)
elif isinstance(
self.pipeline, transformers.AutomaticSpeechRecognitionPipeline
) and model_config.get("return_timestamps", None) in ["word", "char"]:
output = json.dumps(raw_output)
elif isinstance(
self.pipeline,
(
transformers.AudioClassificationPipeline,
transformers.TextClassificationPipeline,
transformers.ImageClassificationPipeline,
),
):
return pd.DataFrame(raw_output)
else:
output = self._parse_lists_of_dict_to_list_of_str(raw_output, output_key)
sanitized = self._sanitize_output(output, data)
return self._wrap_strings_as_list_if_scalar(sanitized)
def _parse_raw_pipeline_input(self, data):
"""
Converts inputs to the expected types for specific Pipeline types.
Specific logic for individual pipeline types are called via their respective methods if
the input isn't a basic str or List[str] input type of Pipeline.
These parsers are required due to the conversion that occurs within schema validation to
a Pandas DataFrame encapsulation, a format which is unsupported for the `transformers`
library.
"""
import transformers
if isinstance(self.pipeline, transformers.TableQuestionAnsweringPipeline):
data = self._coerce_exploded_dict_to_single_dict(data)
return self._parse_input_for_table_question_answering(data)
elif _is_conversational_pipeline(self.pipeline):
return self._parse_conversation_input(data)
elif ( # noqa: SIM114
isinstance(
self.pipeline,
(
transformers.FillMaskPipeline,
transformers.TextGenerationPipeline,
transformers.TranslationPipeline,
transformers.SummarizationPipeline,
transformers.TokenClassificationPipeline,
),
)
and isinstance(data, list)
and all(isinstance(entry, dict) for entry in data)
):
return [list(entry.values())[0] for entry in data]
# NB: For Text2TextGenerationPipeline, we need more complex handling for dictionary,
# as we allow both single string input and dictionary input (or list of them). Both
# are once wrapped to Pandas DataFrame during schema enforcement and convert back to
# dictionary. The difference between two is columns of the DataFrame, where the first
# case (string) will have auto-generated columns like 0, 1, ... while the latter (dict)
# will have the original keys to be the columns. When converting back to dictionary,
# those columns will becomes the key of dictionary.
#
# E.g.
# 1. If user's input is string like model.predict("foo")
# -> Raw input: "foo"
# -> Pandas dataframe has column 0, with single row "foo"
# -> Derived dictionary will be {0: "foo"}
# 2. If user's input is dictionary like model.predict({"text": "foo"})
# -> Raw input: {"text": "foo"}
# -> Pandas dataframe has column "text", with single row "foo"
# -> Derived dictionary will be {"text": "foo"}
#
# Then for the first case, we want to extract values only, similar to other pipelines.
# However, for the second case, we want to keep the key-value pair as it is.
# In long-term, we should definitely change the upstream handling to avoid this
# complexity, but here we just try to make it work by checking if the key is auto-generated.
elif (
isinstance(self.pipeline, transformers.Text2TextGenerationPipeline)
and isinstance(data, list)
and all(isinstance(entry, dict) for entry in data)
# Pandas Dataframe derived dictionary will have integer key (row index)
and 0 in data[0].keys()
):
return [list(entry.values())[0] for entry in data]
elif isinstance(self.pipeline, transformers.TextClassificationPipeline):
return self._validate_text_classification_input(data)
else:
return data
@staticmethod
def _validate_text_classification_input(data):
"""
Perform input type validation for TextClassification pipelines and casting of data
that is manipulated internally by the MLflow model server back to a structure that
can be used for pipeline inference.
To illustrate the input and outputs of this function, for the following inputs to
the pyfunc.predict() call for this pipeline type:
"text to classify"
["text to classify", "other text to classify"]
{"text": "text to classify", "text_pair": "pair text"}
[{"text": "text", "text_pair": "pair"}, {"text": "t", "text_pair": "tp" }]
Pyfunc processing will convert these to the following structures:
[{0: "text to classify"}]
[{0: "text to classify"}, {0: "other text to classify"}]
[{"text": "text to classify", "text_pair": "pair text"}]
[{"text": "text", "text_pair": "pair"}, {"text": "t", "text_pair": "tp" }]
The purpose of this function is to convert them into the correct format for input
to the pipeline (wrapping as a list has no bearing on the correctness of the
inferred classifications):
["text to classify"]
["text to classify", "other text to classify"]
[{"text": "text to classify", "text_pair": "pair text"}]
[{"text": "text", "text_pair": "pair"}, {"text": "t", "text_pair": "tp" }]
Additionally, for dict input types (the 'text' & 'text_pair' input example), the dict
input will be JSON stringified within MLflow model serving. In order to reconvert this
structure back into the appropriate type, we use ast.literal_eval() to convert back
to a dict. We avoid using JSON.loads() due to pandas DataFrame conversions that invert
single and double quotes with escape sequences that are not consistent if the string
contains escaped quotes.
"""
def _check_keys(payload):
"""Check if a dictionary contains only allowable keys."""
allowable_str_keys = {"text", "text_pair"}
if set(payload) - allowable_str_keys and not all(
isinstance(key, int) for key in payload.keys()
):
raise MlflowException(
"Text Classification pipelines may only define dictionary inputs with keys "
f"defined as {allowable_str_keys}"
)
if isinstance(data, str):
return data
elif isinstance(data, dict):
_check_keys(data)
return data
elif isinstance(data, list):
if all(isinstance(item, str) for item in data):
return data
elif all(isinstance(item, dict) for item in data):
for payload in data:
_check_keys(payload)
if list(data[0].keys())[0] == 0:
data = [item[0] for item in data]
try:
# NB: To support MLflow serving signature validation, the value within dict
# inputs is JSON encoded. In order for the proper data structure input support
# for a {"text": "a", "text_pair": "b"} (or the list of such a structure) as
# an input, we have to convert the string encoded dict back to a dict.
# Due to how unescaped characters (such as "'") are encoded, using an explicit
# json.loads() attempted cast can result in invalid input data to the pipeline.
# ast.literal_eval() shows correct conversion, as validated in unit tests.
return [ast.literal_eval(s) for s in data]
except (ValueError, SyntaxError):
return data
else:
raise MlflowException(
"An unsupported data type has been passed for Text Classification inference. "
"Only str, list of str, dict, and list of dict are supported."
)
else:
raise MlflowException(
"An unsupported data type has been passed for Text Classification inference. "
"Only str, list of str, dict, and list of dict are supported."
)
def _parse_conversation_input(self, data) -> str:
if isinstance(data, str):
return data
elif isinstance(data, list) and all(isinstance(elem, dict) for elem in data):
return next(iter(data[0].values()))
elif isinstance(data, dict):
# The conversation pipeline can only accept a single string at a time
return next(iter(data.values()))
def _parse_input_for_table_question_answering(self, data):
if "table" not in data:
raise MlflowException(
"The input dictionary must have the 'table' key.",
error_code=INVALID_PARAMETER_VALUE,
)
elif isinstance(data["table"], dict):
data["table"] = json.dumps(data["table"])
return data
else:
return data
def _coerce_exploded_dict_to_single_dict(
self, data: list[dict[str, Any]]
) -> dict[str, list[Any]]:
"""
Parses the result of Pandas DataFrame.to_dict(orient="records") from pyfunc
signature validation to coerce the output to the required format for a
Pipeline that requires a single dict with list elements such as
TableQuestionAnsweringPipeline.
Example input:
[
{"answer": "We should order more pizzas to meet the demand."},
{"answer": "The venue size should be updated to handle the number of guests."},
]
Output:
{
"answer": [
"We should order more pizzas to meet the demand.",
"The venue size should be updated to handle the number of guests.",
]
}
"""
if isinstance(data, list) and all(isinstance(item, dict) for item in data):
collection = data.copy()
parsed = collection[0]
for coll in collection:
for key, value in coll.items():
if key not in parsed:
raise MlflowException(
"Unable to parse the input. The keys within each "
"dictionary of the parsed input are not consistent"
"among the dictionaries.",
error_code=INVALID_PARAMETER_VALUE,
)
if value != parsed[key]:
value_type = type(parsed[key])
if value_type == str:
parsed[key] = [parsed[key], value]
elif value_type == list:
if all(len(entry) == 1 for entry in value):
# This conversion is required solely for model serving.
# In the parsing logic that occurs internally, strings that
# contain single quotes `'` result in casting to a List[char]
# instead of a str type. Attempting to append a List[char]
# to a List[str] as would happen in the `else` block here
# results in the entire List being overwritten as `None` without
# an Exception being raised. By checking for single value entries
# and subsequently converting to list and extracting the first
# element reconstructs the original input string.
parsed[key].append([str(value)][0])
else:
parsed[key] = parsed[key].append(value)
else:
parsed[key] = value
return parsed
else:
return data
def _flatten_zero_shot_text_classifier_output_to_df(self, data):
"""
Converts the output of sequences, labels, and scores to a Pandas DataFrame output.
Example input:
[{'sequence': 'My dog loves to eat spaghetti',
'labels': ['happy', 'sad'],
'scores': [0.9896970987319946, 0.010302911512553692]},
{'sequence': 'My dog hates going to the vet',
'labels': ['sad', 'happy'],
'scores': [0.957074761390686, 0.042925238609313965]}]
Output:
pd.DataFrame in a fully normalized (flattened) format with each sequence, label, and score
having a row entry.
For example, here is the DataFrame output:
sequence labels scores
0 My dog loves to eat spaghetti happy 0.989697
1 My dog loves to eat spaghetti sad 0.010303
2 My dog hates going to the vet sad 0.957075
3 My dog hates going to the vet happy 0.042925
"""
if isinstance(data, list) and not all(isinstance(item, dict) for item in data):
raise MlflowException(
"Encountered an unknown return type from the pipeline type "
f"{type(self.pipeline).__name__}. Expecting a List[Dict]",
error_code=BAD_REQUEST,
)
if isinstance(data, dict):
data = [data]
flattened_data = []
for entry in data:
for label, score in zip(entry["labels"], entry["scores"]):
flattened_data.append(
{"sequence": entry["sequence"], "labels": label, "scores": score}
)
return pd.DataFrame(flattened_data)
def _strip_input_from_response_in_instruction_pipelines(
self,
input_data,
output,
output_key,
flavor_config,
include_prompt=True,
collapse_whitespace=False,
):
"""
Parse the output from instruction pipelines to conform with other text generator
pipeline types and remove line feed characters and other confusing outputs
"""
def extract_response_data(data_out):
if all(isinstance(x, dict) for x in data_out):
return [elem[output_key] for elem in data_out][0]
elif all(isinstance(x, list) for x in data_out):
return [elem[output_key] for coll in data_out for elem in coll]
else:
raise MlflowException(
"Unable to parse the pipeline output. Expected List[Dict[str,str]] or "
f"List[List[Dict[str,str]]] but got {type(data_out)} instead."
)
output = extract_response_data(output)
def trim_input(data_in, data_out):
# NB: the '\n\n' pattern is exclusive to specific InstructionalTextGenerationPipeline
# types that have been loaded as a plain TextGenerator. The structure of these
# pipelines will precisely repeat the input question immediately followed by 2 carriage
# return statements, followed by the start of the response to the prompt. We only
# want to left-trim these types of pipelines output values if the user has indicated
# the removal action of the input prompt in the returned str or List[str] by applying
# the optional model_config entry of `{"include_prompt": False}`.
# By default, the prompt is included in the response.
# Stripping out additional carriage returns (\n) is another additional optional flag
# that can be set for these generator pipelines. It is off by default (False).
if (
not include_prompt
and flavor_config[FlavorKey.INSTANCE_TYPE] in self._supported_custom_generator_types
and data_out.startswith(data_in + "\n\n")
):
# If the user has indicated to not preserve the prompt input in the response,
# split the response output and trim the input prompt from the response.
data_out = data_out[len(data_in) :].lstrip()
if data_out.startswith("A:"):
data_out = data_out[2:].lstrip()
# If the user has indicated to remove newlines and extra spaces from the generated
# text, replace them with a single space.
if collapse_whitespace:
data_out = re.sub(r"\s+", " ", data_out).strip()
return data_out
if isinstance(input_data, list) and isinstance(output, list):
return [trim_input(data_in, data_out) for data_in, data_out in zip(input_data, output)]
elif isinstance(input_data, str) and isinstance(output, str):
return trim_input(input_data, output)
else:
raise MlflowException(
"Unknown data structure after parsing output. Expected str or List[str]. "
f"Got {type(output)} instead."
)
def _sanitize_output(self, output, input_data):
# Some pipelines and their underlying models leave leading or trailing whitespace.
# This method removes that whitespace.
import transformers
if (
not isinstance(self.pipeline, transformers.TokenClassificationPipeline)
and isinstance(input_data, str)
and isinstance(output, list)
):
# Retrieve the first output for return types that are List[str] of only a single
# element.
output = output[0]
if isinstance(output, str):
return output.strip()
elif isinstance(output, list):
if all(isinstance(elem, str) for elem in output):
cleaned = [text.strip() for text in output]
# If the list has only a single string, return as string.
return cleaned if len(cleaned) > 1 else cleaned[0]
else:
return [self._sanitize_output(coll, input_data) for coll in output]
elif isinstance(output, dict) and all(
isinstance(key, str) and isinstance(value, str) for key, value in output.items()
):
return {k: v.strip() for k, v in output.items()}
else:
return output
@staticmethod
def _wrap_strings_as_list_if_scalar(output_data):
"""
Wraps single string outputs in a list to support batch processing logic in serving.
Scalar values are not supported for processing in batch logic as they cannot be coerced
to DataFrame representations.
"""
if isinstance(output_data, str):
return [output_data]
else:
return output_data
def _parse_lists_of_dict_to_list_of_str(self, output_data, target_dict_key) -> list[str]:
"""
Parses the output results from select Pipeline types to extract specific values from a
target key.
Examples (with "a" as the `target_dict_key`):
Input: [{"a": "valid", "b": "invalid"}, {"a": "another valid", "c": invalid"}]
Output: ["valid", "another_valid"]
Input: [{"a": "valid", "b": [{"a": "another valid"}, {"b": "invalid"}]},
{"a": "valid 2", "b": [{"a": "another valid 2"}, {"c": "invalid"}]}]
Output: ["valid", "another valid", "valid 2", "another valid 2"]
"""
if isinstance(output_data, list):
output_coll = []
for output in output_data:
if isinstance(output, dict):
for key, value in output.items():
if key == target_dict_key:
output_coll.append(output[target_dict_key])
elif isinstance(value, list) and all(
isinstance(elem, dict) for elem in value
):
output_coll.extend(
self._parse_lists_of_dict_to_list_of_str(value, target_dict_key)
)
elif isinstance(output, list):
output_coll.extend(
self._parse_lists_of_dict_to_list_of_str(output, target_dict_key)
)
return output_coll
elif target_dict_key:
return output_data[target_dict_key]
else:
return output_data
@staticmethod
def _parse_feature_extraction_input(input_data):
if isinstance(input_data, list) and isinstance(input_data[0], dict):
return [list(data.values())[0] for data in input_data]
else:
return input_data
@staticmethod
def _parse_feature_extraction_output(output_data):
"""
Parse the return type from a FeatureExtractionPipeline output. The mixed types for
input are present depending on how the pyfunc is instantiated. For model serving usage,
the returned type from MLServer will be a numpy.ndarray type, otherwise, the return
within a manually executed pyfunc (i.e., for udf usage), the return will be a collection
of nested lists.
Examples:
Input: [[[0.11, 0.98, 0.76]]] or np.array([0.11, 0.98, 0.76])
Output: np.array([0.11, 0.98, 0.76])
Input: [[[[0.1, 0.2], [0.3, 0.4]]]] or
np.array([np.array([0.1, 0.2]), np.array([0.3, 0.4])])
Output: np.array([np.array([0.1, 0.2]), np.array([0.3, 0.4])])
"""
if isinstance(output_data, np.ndarray):
return output_data
else:
return np.array(output_data[0][0])
def _parse_tokenizer_output(self, output_data, target_set):
"""
Parses the tokenizer pipeline output.
Examples:
Input: [{"entity": "PRON", "score": 0.95}, {"entity": "NOUN", "score": 0.998}]
Output: "PRON,NOUN"
Input: [[{"entity": "PRON", "score": 0.95}, {"entity": "NOUN", "score": 0.998}],
[{"entity": "PRON", "score": 0.95}, {"entity": "NOUN", "score": 0.998}]]
Output: ["PRON,NOUN", "PRON,NOUN"]
"""
# NB: We're collapsing the results here to a comma separated string for each inference
# input string. This is to simplify having to otherwise make extensive changes to
# ColSpec in order to support schema enforcement of List[List[str]]
if isinstance(output_data[0], list):
return [self._parse_tokenizer_output(coll, target_set) for coll in output_data]
else:
# NB: Since there are no attributes accessible from the pipeline object that determine
# what the characteristics of the return structure names are within the dictionaries,
# Determine which one is present in the output to extract the correct entries.
target = target_set.intersection(output_data[0].keys()).pop()
return ",".join([coll[target] for coll in output_data])
@staticmethod
def _parse_list_of_multiple_dicts(output_data, target_dict_key):
"""
Returns the first value of the `target_dict_key` that matches in the first dictionary in a
list of dictionaries.
"""
def fetch_target_key_value(data, key):
if isinstance(data[0], dict):
return data[0][key]
return [item[0][key] for item in data]
if isinstance(output_data[0], list):
return [
fetch_target_key_value(collection, target_dict_key) for collection in output_data
]
else:
return [output_data[0][target_dict_key]]
def _parse_question_answer_input(self, data):
"""
Parses the single string input representation for a question answer pipeline into the
required dict format for a `question-answering` pipeline.
"""
if isinstance(data, list):
return [self._parse_question_answer_input(entry) for entry in data]
elif isinstance(data, dict):
expected_keys = {"question", "context"}
if not expected_keys.intersection(set(data.keys())) == expected_keys:
raise MlflowException(
f"Invalid keys were submitted. Keys must be exclusively {expected_keys}"
)
return data
else:
raise MlflowException(
"An invalid type has been supplied. Must be either List[Dict[str, str]] or "
f"Dict[str, str]. {type(data)} is not supported.",
error_code=INVALID_PARAMETER_VALUE,
)
def _parse_text2text_input(self, data):
"""
Parses the mixed input types that can be submitted into a text2text Pipeline.
Valid examples:
Input:
{"context": "abc", "answer": "def"}
Output:
"context: abc answer: def"
Input:
[{"context": "abc", "answer": "def"}, {"context": "ghi", "answer": "jkl"}]
Output:
["context: abc answer: def", "context: ghi answer: jkl"]
Input:
"abc"
Output:
"abc"
Input:
["abc", "def"]
Output:
["abc", "def"]
"""
if isinstance(data, dict) and all(isinstance(value, str) for value in data.values()):
if all(isinstance(key, str) for key in data) and "inputs" not in data:
# NB: Text2Text Pipelines require submission of text in a pseudo-string based dict
# formatting.
# As an example, for the input of:
# data = {"context": "The sky is blue", "answer": "blue"}
# This method will return the Pipeline-required format of:
# "context: The sky is blue. answer: blue"
return " ".join(f"{key}: {value}" for key, value in data.items())
else:
return list(data.values())
elif isinstance(data, list) and all(isinstance(value, dict) for value in data):
return [self._parse_text2text_input(entry) for entry in data]
elif isinstance(data, str) or (
isinstance(data, list) and all(isinstance(value, str) for value in data)
):
return data
else:
raise MlflowException(
f"An invalid type has been supplied: {_truncate_and_ellipsize(data, 100)} "
f"(type: {type(data).__name__}). Please supply a Dict[str, str], str, List[str], "
"or a List[Dict[str, str]] for a Text2Text Pipeline.",
error_code=INVALID_PARAMETER_VALUE,
)
def _parse_json_encoded_list(self, data, key_to_unpack):
"""
Parses the complex input types for pipelines such as ZeroShotClassification in which
the required input type is Dict[str, Union[str, List[str]]] wherein the list
provided is encoded as JSON. This method unpacks that string to the required
elements.
"""
if isinstance(data, list):
return [self._parse_json_encoded_list(entry, key_to_unpack) for entry in data]
elif isinstance(data, dict):
if key_to_unpack not in data:
raise MlflowException(
"Invalid key in inference payload. The expected inference data key "
f"is: {key_to_unpack}",
error_code=INVALID_PARAMETER_VALUE,
)
if isinstance(data[key_to_unpack], str):
try:
return {
k: (json.loads(v) if k == key_to_unpack else v) for k, v in data.items()
}
except json.JSONDecodeError:
return data
elif isinstance(data[key_to_unpack], list):
return data
@staticmethod
def _parse_json_encoded_dict_payload_to_dict(data, key_to_unpack):
"""
Parses complex dict input types that have been json encoded. Pipelines like
TableQuestionAnswering require such input types.
"""
if isinstance(data, list):
return [
{
key: (
json.loads(value)
if key == key_to_unpack and isinstance(value, str)
else value
)
for key, value in entry.items()
}
for entry in data
]
elif isinstance(data, dict):
# This is to handle serving use cases as the DataFrame encapsulation converts
# collections within rows to np.array type. In order to process this data through
# the transformers.Pipeline API, we need to cast these arrays back to lists
# and replace the single quotes with double quotes after extracting the
# json-encoded `table` (a pandas DF) in order to convert it to a dict that
# the TableQuestionAnsweringPipeline can accept and cast to a Pandas DataFrame.
#
# An example casting that occurs for this case when input to model serving is the
# conversion of a user input of:
# '{"inputs": {"query": "What is the longest distance?",
# "table": {"Distance": ["1000", "10", "1"]}}}'
# is converted to:
# [{'query': array('What is the longest distance?', dtype='<U29'),
# 'table': array('{\'Distance\': [\'1000\', \'10\', \'1\']}', dtype='U<204')}]
# which is an invalid input to the pipeline.
# this method converts the input to:
# {'query': 'What is the longest distance?',
# 'table': {'Distance': ['1000', '10', '1']}}
# which is a valid input to the TableQuestionAnsweringPipeline.
output = {}
for key, value in data.items():
if key == key_to_unpack:
if isinstance(value, np.ndarray):
output[key] = ast.literal_eval(value.item())
else:
output[key] = ast.literal_eval(value)
else:
if isinstance(value, np.ndarray):
# This cast to np.ndarray occurs when more than one question is asked.
output[key] = value.item()
else:
# Otherwise, the entry does not need casting from a np.ndarray type to
# list as it is already a scalar string.
output[key] = value
return output
else:
return {
key: (
json.loads(value) if key == key_to_unpack and isinstance(value, str) else value
)
for key, value in data.items()
}
@staticmethod
def _validate_str_or_list_str(data):
if not isinstance(data, (str, list)):
raise MlflowException(
f"The input data is of an incorrect type. {type(data)} is invalid. "
"Must be either string or List[str]",
error_code=INVALID_PARAMETER_VALUE,
)
elif isinstance(data, list) and not all(isinstance(entry, str) for entry in data):
raise MlflowException(
"If supplying a list, all values must be of string type.",
error_code=INVALID_PARAMETER_VALUE,
)
@staticmethod
def _convert_cast_lists_from_np_back_to_list(data):
"""
This handles the casting of dicts within lists from Pandas DF conversion within model
serving back into the required Dict[str, List[str]] if this type matching occurs.
Otherwise, it's a noop.
"""
if not isinstance(data, list):
# NB: applying a short-circuit return here to not incur runtime overhead with
# type validation if the input is not a list
return data
elif not all(isinstance(value, dict) for value in data):
return data
else:
parsed_data = []
for entry in data:
if all(isinstance(value, np.ndarray) for value in entry.values()):
parsed_data.append({key: value.tolist() for key, value in entry.items()})
else:
parsed_data.append(entry)
return parsed_data
@staticmethod
def is_base64_image(image):
"""Check whether input image is a base64 encoded"""
try:
b64_decoded_image = base64.b64decode(image)
return (
base64.b64encode(b64_decoded_image).decode("utf-8") == image
or base64.encodebytes(b64_decoded_image).decode("utf-8") == image
)
except binascii.Error:
return False
def _convert_image_input(self, input_data):
"""
Conversion utility for decoding the base64 encoded bytes data of a raw image file when
parsed through model serving, if applicable. Direct usage of the pyfunc implementation
outside of model serving will treat this utility as a noop.
For reference, the expected encoding for input to Model Serving will be:
import requests
import base64
response = requests.get("https://www.my.images/a/sound/file.jpg")
encoded_image = base64.b64encode(response.content).decode("utf-8")
inference_data = json.dumps({"inputs": [encoded_image]})
or
inference_df = pd.DataFrame(
pd.Series([encoded_image], name="image_file")
)
split_dict = {"dataframe_split": inference_df.to_dict(orient="split")}
split_json = json.dumps(split_dict)
or
records_dict = {"dataframe_records": inference_df.to_dict(orient="records")}
records_json = json.dumps(records_dict)
This utility will convert this JSON encoded, base64 encoded text back into bytes for
input into the Image pipelines for inference.
"""
def process_input_element(input_element):
input_value = next(iter(input_element.values()))
if isinstance(input_value, str) and not self.is_base64_image(input_value):
self._validate_str_input_uri_or_file(input_value)
return input_value
if isinstance(input_data, list) and all(
isinstance(element, dict) for element in input_data
):
# Use a list comprehension for readability
# the elimination of empty collection declarations
return [process_input_element(element) for element in input_data]
elif isinstance(input_data, str) and not self.is_base64_image(input_data):
self._validate_str_input_uri_or_file(input_data)
return input_data
def _convert_audio_input(
self, data: AudioInput | list[dict[int, list[AudioInput]]]
) -> AudioInput | list[AudioInput]:
"""
Convert the input data into the format that the Transformers pipeline expects.
Args:
data: The input data to be converted. This can be one of the following:
1. A single input audio data (bytes, numpy array, or a path or URI to an audio file)
2. List of dictionaries, derived from Pandas DataFrame with `orient="records"`.
This is the outcome of the pyfunc signature validation for the audio input.
E.g. [{[0]: <audio data>}, {[1]: <audio data>}]
Returns:
A single or list of audio data.
"""
if isinstance(data, list):
data = [list(element.values())[0] for element in data]
decoded = [self._decode_audio(audio) for audio in data]
# Signature validation converts a single audio data into a list (via Pandas Series).
# We have to unwrap it back not to confuse with batch processing.
return decoded if len(decoded) > 1 else decoded[0]
else:
return self._decode_audio(data)
def _decode_audio(self, audio: AudioInput) -> AudioInput:
"""
Decode the audio data if it is base64 encoded bytes, otherwise no-op.
"""
if isinstance(audio, str):
# Input is an URI to the audio file to be processed.
self._validate_str_input_uri_or_file(audio)
return audio
elif isinstance(audio, np.ndarray):
# Input is a numpy array that contains floating point time series of the audio.
return audio
elif isinstance(audio, bytes):
# Input is a bytes object. In model serving, the input audio data is b64encoded.
# They are typically decoded before reaching here, but iff the inference payload
# contains raw bytes in the key 'inputs', the upstream code will not decode the
# bytes. Therefore, we need to decode the bytes here. For other cases like
# 'dataframe_records' or 'dataframe_split', the bytes should be already decoded.
if self.is_base64_audio(audio):
return base64.b64decode(audio)
else:
return audio
else:
raise MlflowException(
"Invalid audio data. Must be either bytes, str, or np.ndarray.",
error_code=INVALID_PARAMETER_VALUE,
)
@staticmethod
def is_base64_audio(audio: bytes) -> bool:
"""Check whether input audio is a base64 encoded"""
try:
return base64.b64encode(base64.b64decode(audio)) == audio
except binascii.Error:
return False
@staticmethod
def _validate_str_input_uri_or_file(input_str):
"""
Validation of blob references to either audio or image files,
if a string is input to the ``predict``
method, perform validation of the string contents by checking for a valid uri or
filesystem reference instead of surfacing the cryptic stack trace that is otherwise raised
for an invalid uri input.
"""
def is_uri(s):
try:
result = urlparse(s)
return all([result.scheme, result.netloc])
except ValueError:
return False
valid_uri = os.path.isfile(input_str) or is_uri(input_str)
if not valid_uri:
if len(input_str) <= 20:
data_str = f"Received: {input_str}"
else:
data_str = f"Received (truncated): {input_str[:20]}..."
raise MlflowException(
"An invalid string input was provided. String inputs to "
"audio or image files must be either a file location or a uri."
f"audio files must be either a file location or a uri. {data_str}",
error_code=BAD_REQUEST,
)
def _format_prompt_template(self, input_data):
"""
Wraps the input data in the specified prompt template. If no template is
specified, or if the pipeline is an unsupported type, or if the input type
is not a string or list of strings, then the input data is returned unchanged.
"""
if not self.prompt_template:
return input_data
if self.pipeline.task not in _SUPPORTED_PROMPT_TEMPLATING_TASK_TYPES:
raise MlflowException(
f"_format_prompt_template called on an unexpected pipeline type. "
f"Expected one of: {_SUPPORTED_PROMPT_TEMPLATING_TASK_TYPES}. "
f"Received: {self.pipeline.task}"
)
if isinstance(input_data, str):
return self.prompt_template.format(prompt=input_data)
elif isinstance(input_data, list):
# if every item is a string, then apply formatting to every item
if all(isinstance(data, str) for data in input_data):
return [self.prompt_template.format(prompt=data) for data in input_data]
# throw for unsupported types
raise MlflowException.invalid_parameter_value(
"Prompt templating is only supported for data of type str or List[str]. "
f"Got {type(input_data)} instead."
)
@autologging_integration(FLAVOR_NAME)
def autolog(
log_input_examples=False,
log_model_signatures=False,
log_models=False,
log_datasets=False,
disable=False,
exclusive=False,
disable_for_unsupported_versions=False,
silent=False,
extra_tags=None,
):
"""
This autologging integration is solely used for disabling spurious autologging of irrelevant
sub-models that are created during the training and evaluation of transformers-based models.
Autologging functionality is not implemented fully for the transformers flavor.
"""
# A list of other flavors whose base autologging config would be automatically logged due to
# training a model that would otherwise create a run and be logged internally within the
# transformers-supported trainer calls.
DISABLED_ANCILLARY_FLAVOR_AUTOLOGGING = ["sklearn", "tensorflow", "pytorch"]
def train(original, *args, **kwargs):
with disable_discrete_autologging(DISABLED_ANCILLARY_FLAVOR_AUTOLOGGING):
return original(*args, **kwargs)
with contextlib.suppress(ImportError):
import setfit
safe_patch(
FLAVOR_NAME,
(setfit.SetFitTrainer if Version(setfit.__version__).major < 1 else setfit.Trainer),
"train",
functools.partial(train),
manage_run=False,
)
with contextlib.suppress(ImportError):
import transformers
classes = [transformers.Trainer, transformers.Seq2SeqTrainer]
methods = ["train"]
for clazz in classes:
for method in methods:
safe_patch(FLAVOR_NAME, clazz, method, functools.partial(train), manage_run=False)
def _get_prompt_template(model_path):
if not os.path.exists(model_path):
raise MlflowException(
f'Could not find an "{MLMODEL_FILE_NAME}" configuration file at "{model_path}"',
RESOURCE_DOES_NOT_EXIST,
)
model_conf = Model.load(model_path)
if model_conf.metadata:
return model_conf.metadata.get(FlavorKey.PROMPT_TEMPLATE)
return None
def _validate_prompt_template(prompt_template):
if prompt_template is None:
return
if not isinstance(prompt_template, str):
raise MlflowException(
f"Argument `prompt_template` must be a string, received {type(prompt_template)}",
INVALID_PARAMETER_VALUE,
)
format_args = [
tup[1] for tup in string.Formatter().parse(prompt_template) if tup[1] is not None
]
# expect there to only be one format arg, and for that arg to be "prompt"
if format_args != ["prompt"]:
raise MlflowException.invalid_parameter_value(
"Argument `prompt_template` must be a string with a single format arg, 'prompt'. "
"For example: 'Answer the following question in a friendly tone. Q: {prompt}. A:'\n"
f"Received {prompt_template}. "
)
|
_TransformersWrapper
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/aio/_connection.py
|
{
"start": 6107,
"end": 8528
}
|
class ____(AsyncConnectionPool):
"""Async connection pool for Azure Database for PostgreSQL connections."""
def __init__(
self,
conninfo: str = "",
*,
azure_conn_info: AsyncConnectionInfo = AsyncConnectionInfo(),
**kwargs,
):
if isinstance(azure_conn_info.credentials, AsyncTokenCredential):
credential_provider = azure_conn_info.credentials
coroutine = credential_provider.get_token(TOKEN_CREDENTIAL_SCOPE)
_logger.debug(
"getting token from TokenCredential for the scope: %s",
TOKEN_CREDENTIAL_SCOPE,
)
token = run_coroutine_in_sync(coroutine)
_logger.info("getting username and password from token")
username, password = get_username_password(token)
_logger.debug("wrapping reconnect_failed function")
reconnect_failed: (
Callable[[AsyncConnectionPool], Awaitable[None]] | None
) = kwargs.get("reconnect_failed")
async def reconnect_failed_wrapper(pool: AsyncConnectionPool) -> None:
if reconnect_failed:
await reconnect_failed(pool)
_logger.debug(
"getting token from TokenCredential for the scope: %s",
TOKEN_CREDENTIAL_SCOPE,
)
token = await credential_provider.get_token(TOKEN_CREDENTIAL_SCOPE)
_logger.info("getting username and password from token")
username, password = get_username_password(token)
pool.kwargs.update(
user=username,
password=password,
)
kwargs["reconnect_failed"] = reconnect_failed_wrapper
else:
username, password = get_username_password(azure_conn_info.credentials)
azure_conn_info_kwargs = azure_conn_info.model_dump(
mode="json", exclude_none=True, exclude=set(["credentials"])
)
_logger.debug(
"updating AsyncConnectionPool kwargs with those from: %s",
azure_conn_info_kwargs,
)
kwargs_ = kwargs.get("kwargs", {})
kwargs_.update(user=username, password=password, **azure_conn_info_kwargs)
kwargs["kwargs"] = kwargs_
super().__init__(conninfo, **kwargs)
|
AsyncAzurePGConnectionPool
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/ranges/test_indexing.py
|
{
"start": 5161,
"end": 5593
}
|
class ____:
def test_where_putmask_range_cast(self):
# GH#43240
idx = RangeIndex(0, 5, name="test")
mask = np.array([True, True, False, False, False])
result = idx.putmask(mask, 10)
expected = Index([10, 10, 2, 3, 4], dtype=np.int64, name="test")
tm.assert_index_equal(result, expected)
result = idx.where(~mask, 10)
tm.assert_index_equal(result, expected)
|
TestWhere
|
python
|
getsentry__sentry
|
src/sentry/issues/grouptype.py
|
{
"start": 17074,
"end": 17610
}
|
class ____(GroupType):
type_id = 1018
slug = "performance_p95_endpoint_regression"
description = "Endpoint Regression"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.METRIC.value
enable_auto_resolve = False
enable_escalation_detection = False
default_priority = PriorityLevel.MEDIUM
released = True
notification_config = NotificationConfig(context=[NotificationContextField.APPROX_START_TIME])
# experimental
@dataclass(frozen=True)
|
PerformanceP95EndpointRegressionGroupType
|
python
|
PrefectHQ__prefect
|
src/prefect/cli/transfer/_migratable_resources/base.py
|
{
"start": 957,
"end": 1844
}
|
class ____(Generic[T], abc.ABC):
@property
@abc.abstractmethod
def source_id(self) -> uuid.UUID: ...
@property
@abc.abstractmethod
def destination_id(self) -> uuid.UUID | None: ...
# Using this construct method because we may want to persist a serialized version of the object
# to disk and reload it later to avoid using too much memory.
@classmethod
@abc.abstractmethod
async def construct(cls, obj: T) -> "MigratableResource[T]": ...
@abc.abstractmethod
async def get_dependencies(self) -> "list[MigratableProtocol]": ...
@classmethod
@abc.abstractmethod
async def get_instance(cls, id: uuid.UUID) -> "MigratableResource[T] | None": ...
@abc.abstractmethod
async def migrate(self) -> None: ...
def __str__(self) -> str:
return f"{type(self).__name__}(source_id={self.source_id})"
|
MigratableResource
|
python
|
huggingface__transformers
|
src/transformers/models/vaultgemma/modular_vaultgemma.py
|
{
"start": 9127,
"end": 10544
}
|
class ____(Gemma2DecoderLayer):
def __init__(self, **super_kwargs):
super().__init__(**super_kwargs)
del self.post_attention_layernorm
del self.post_feedforward_layernorm
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
VaultGemmaDecoderLayer
|
python
|
getsentry__sentry
|
src/sentry/utils/snuba.py
|
{
"start": 14432,
"end": 14548
}
|
class ____(QueryExecutionError):
"""
Exception raised when a query is not valid.
"""
|
SchemaValidationError
|
python
|
ray-project__ray
|
python/ray/llm/_internal/batch/stages/tokenize_stage.py
|
{
"start": 1878,
"end": 2213
}
|
class ____(StatefulStage):
"""
A stage that tokenizes the input.
"""
fn: Type[StatefulStageUDF] = TokenizeUDF
def get_required_input_keys(self) -> Dict[str, str]:
"""The required input keys of the stage and their descriptions."""
return {"prompt": "The text prompt (str) to tokenize."}
|
TokenizeStage
|
python
|
huggingface__transformers
|
src/transformers/models/rembert/modeling_rembert.py
|
{
"start": 39652,
"end": 44401
}
|
class ____(RemBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.rembert = RemBertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.rembert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
|
RemBertForMultipleChoice
|
python
|
keon__algorithms
|
algorithms/tree/deepest_left.py
|
{
"start": 246,
"end": 985
}
|
class ____:
def __init__(self):
self.depth = 0
self.Node = None
def find_deepest_left(root, is_left, depth, res):
if not root:
return
if is_left and depth > res.depth:
res.depth = depth
res.Node = root
find_deepest_left(root.left, True, depth + 1, res)
find_deepest_left(root.right, False, depth + 1, res)
if __name__ == '__main__':
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
root.right.right = TreeNode(6)
root.right.right.right = TreeNode(7)
res = DeepestLeft()
find_deepest_left(root, True, 1, res)
if res.Node:
print(res.Node.val)
|
DeepestLeft
|
python
|
django__django
|
django/contrib/contenttypes/models.py
|
{
"start": 5011,
"end": 6844
}
|
class ____(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(_("python model class name"), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _("content type")
verbose_name_plural = _("content types")
db_table = "django_content_type"
unique_together = [["app_label", "model"]]
def __str__(self):
return self.app_labeled_name
@property
def name(self):
model = self.model_class()
if not model:
return self.model
return str(model._meta.verbose_name)
@property
def app_labeled_name(self):
model = self.model_class()
if not model:
return self.model
return "%s | %s" % (
model._meta.app_config.verbose_name,
model._meta.verbose_name,
)
def model_class(self):
"""Return the model class for this type of content."""
try:
return apps.get_model(self.app_label, self.model)
except LookupError:
return None
def get_object_for_this_type(self, using=None, **kwargs):
"""
Return an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(using).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Return all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
|
ContentType
|
python
|
django__django
|
tests/template_tests/test_parser.py
|
{
"start": 428,
"end": 8573
}
|
class ____(SimpleTestCase):
def test_token_smart_split(self):
"""
#7027 -- _() syntax should work with spaces
"""
token = Token(
TokenType.BLOCK, 'sometag _("Page not found") value|yesno:_("yes,no")'
)
split = token.split_contents()
self.assertEqual(
split, ["sometag", '_("Page not found")', 'value|yesno:_("yes,no")']
)
def test_repr(self):
token = Token(TokenType.BLOCK, "some text")
self.assertEqual(repr(token), '<Block token: "some text...">')
parser = Parser([token], builtins=[filter_library])
self.assertEqual(
repr(parser),
'<Parser tokens=[<Block token: "some text...">]>',
)
filter_expression = FilterExpression("news|upper", parser)
self.assertEqual(repr(filter_expression), "<FilterExpression 'news|upper'>")
lexer = Lexer("{% for i in 1 %}{{ a }}\n{% endfor %}")
self.assertEqual(
repr(lexer),
'<Lexer template_string="{% for i in 1 %}{{ a...", verbatim=False>',
)
def test_filter_parsing(self):
c = {"article": {"section": "News"}}
p = Parser("", builtins=[filter_library])
def fe_test(s, val):
self.assertEqual(FilterExpression(s, p).resolve(c), val)
fe_test("article.section", "News")
fe_test("article.section|upper", "NEWS")
fe_test('"News"', "News")
fe_test("'News'", "News")
fe_test(r'"Some \"Good\" News"', 'Some "Good" News')
fe_test(r'"Some \"Good\" News"', 'Some "Good" News')
fe_test(r"'Some \'Bad\' News'", "Some 'Bad' News")
fe = FilterExpression(r'"Some \"Good\" News"', p)
self.assertEqual(fe.filters, [])
self.assertEqual(fe.var, 'Some "Good" News')
# Filtered variables should reject access of attributes beginning with
# underscores.
msg = (
"Variables and attributes may not begin with underscores: 'article._hidden'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
FilterExpression("article._hidden|upper", p)
def test_cannot_parse_characters(self):
p = Parser("", builtins=[filter_library])
for filter_expression, characters in [
('<>|default:"Default"|upper', '|<>||default:"Default"|upper'),
("test|<>|upper", "test||<>||upper"),
]:
with self.subTest(filter_expression=filter_expression):
with self.assertRaisesMessage(
TemplateSyntaxError,
f"Could not parse some characters: {characters}",
):
FilterExpression(filter_expression, p)
def test_cannot_find_variable(self):
p = Parser("", builtins=[filter_library])
with self.assertRaisesMessage(
TemplateSyntaxError,
'Could not find variable at start of |default:"Default"',
):
FilterExpression('|default:"Default"', p)
def test_variable_parsing(self):
c = {"article": {"section": "News"}}
self.assertEqual(Variable("article.section").resolve(c), "News")
self.assertEqual(Variable('"News"').resolve(c), "News")
self.assertEqual(Variable("'News'").resolve(c), "News")
# Translated strings are handled correctly.
self.assertEqual(Variable("_(article.section)").resolve(c), "News")
self.assertEqual(Variable('_("Good News")').resolve(c), "Good News")
self.assertEqual(Variable("_('Better News')").resolve(c), "Better News")
# Escaped quotes work correctly as well.
self.assertEqual(
Variable(r'"Some \"Good\" News"').resolve(c), 'Some "Good" News'
)
self.assertEqual(
Variable(r"'Some \'Better\' News'").resolve(c), "Some 'Better' News"
)
# Variables should reject access of attributes and variables beginning
# with underscores.
for name in ["article._hidden", "_article"]:
msg = f"Variables and attributes may not begin with underscores: '{name}'"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Variable(name)
# Variables should raise on non string type
with self.assertRaisesMessage(
TypeError, "Variable must be a string or number, got <class 'dict'>"
):
Variable({})
# Variables should raise when invalid characters in name.
for c in ["+", "-"]:
with self.subTest(invalid_character=c):
variable_name = f"variable{c}name"
with self.assertRaisesMessage(
TemplateSyntaxError,
f"Invalid character ('{c}') in variable name: '{variable_name}'",
):
Variable(variable_name)
def test_filter_args_count(self):
parser = Parser("")
register = Library()
@register.filter
def no_arguments(value):
pass
@register.filter
def one_argument(value, arg):
pass
@register.filter
def one_opt_argument(value, arg=False):
pass
@register.filter
def two_arguments(value, arg, arg2):
pass
@register.filter
def two_one_opt_arg(value, arg, arg2=False):
pass
parser.add_library(register)
for expr in (
'1|no_arguments:"1"',
"1|two_arguments",
'1|two_arguments:"1"',
"1|two_one_opt_arg",
):
with self.assertRaises(TemplateSyntaxError):
FilterExpression(expr, parser)
for expr in (
# Correct number of arguments
"1|no_arguments",
'1|one_argument:"1"',
# One optional
"1|one_opt_argument",
'1|one_opt_argument:"1"',
# Not supplying all
'1|two_one_opt_arg:"1"',
):
FilterExpression(expr, parser)
def test_filter_numeric_argument_parsing(self):
p = Parser("", builtins=[filter_library])
# Values that resolve to a numeric literal.
cases = {
"5": 5,
"-5": -5,
"5.2": 5.2,
".4": 0.4,
"5.2e3": 5200.0, # 5.2 × 10³ = 5200.0.
"5.2E3": 5200.0, # Case-insensitive.
"5.2e-3": 0.0052, # Negative exponent.
"-1.5E4": -15000.0,
"+3.0e2": 300.0,
".5e2": 50.0, # 0.5 × 10² = 50.0
}
for num, expected in cases.items():
with self.subTest(num=num):
self.assertEqual(FilterExpression(num, p).resolve({}), expected)
self.assertEqual(
FilterExpression(f"0|default:{num}", p).resolve({}), expected
)
# Values that are interpreted as names of variables that do not exist.
invalid_numbers = [
"abc123",
"123abc",
"foo",
"error",
"1e",
"e400",
"1e.2",
"1e2.",
"1e2.0",
"1e2a",
"1e2e3",
]
for num in invalid_numbers:
with self.subTest(num=num):
self.assertIsNone(
FilterExpression(num, p).resolve({}, ignore_failures=True)
)
with self.assertRaises(VariableDoesNotExist):
FilterExpression(f"0|default:{num}", p).resolve({})
# Values that are interpreted as an invalid variable name.
invalid_numbers_and_var_names = [
"1e-",
"1e-a",
"1+1",
"1-1",
]
for num in invalid_numbers_and_var_names:
with self.subTest(num=num):
with self.assertRaises(TemplateSyntaxError):
FilterExpression(num, p).resolve({})
with self.assertRaises(TemplateSyntaxError):
FilterExpression(f"0|default:{num}", p).resolve({})
|
ParserTests
|
python
|
doocs__leetcode
|
solution/0200-0299/0209.Minimum Size Subarray Sum/Solution.py
|
{
"start": 0,
"end": 347
}
|
class ____:
def minSubArrayLen(self, target: int, nums: List[int]) -> int:
n = len(nums)
s = list(accumulate(nums, initial=0))
ans = n + 1
for i, x in enumerate(s):
j = bisect_left(s, x + target)
if j <= n:
ans = min(ans, j - i)
return ans if ans <= n else 0
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/add-to-array-form-of-integer.py
|
{
"start": 36,
"end": 526
}
|
class ____(object):
def addToArrayForm(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: List[int]
"""
A.reverse()
carry, i = K, 0
A[i] += carry
carry, A[i] = divmod(A[i], 10)
while carry:
i += 1
if i < len(A):
A[i] += carry
else:
A.append(carry)
carry, A[i] = divmod(A[i], 10)
A.reverse()
return A
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/migration_helpers/test_migrate_alert_rule.py
|
{
"start": 47124,
"end": 49553
}
|
class ____(BaseMetricAlertMigrationTest):
def setUp(self) -> None:
self.metric_alert = self.create_alert_rule()
self.alert_rule_trigger = self.create_alert_rule_trigger(
alert_rule=self.metric_alert, label="critical"
)
self.alert_rule_trigger_action = self.create_alert_rule_trigger_action(
alert_rule_trigger=self.alert_rule_trigger
)
self.create_migrated_metric_alert_objects(self.metric_alert)
self.create_migrated_metric_alert_rule_trigger_objects(
self.alert_rule_trigger, DetectorPriorityLevel.HIGH, Condition.GREATER
)
self.action, self.data_condition_group_action, self.aarta = (
self.create_migrated_metric_alert_rule_action_objects(self.alert_rule_trigger_action)
)
def test_dual_delete_migrated_alert_rule_trigger_action(self) -> None:
dual_delete_migrated_alert_rule_trigger_action(self.alert_rule_trigger_action)
assert not Action.objects.filter(id=self.action.id).exists()
assert not ActionAlertRuleTriggerAction.objects.filter(id=self.aarta.id).exists()
assert not DataConditionGroupAction.objects.filter(
id=self.data_condition_group_action.id
).exists()
@mock.patch("sentry.workflow_engine.migration_helpers.alert_rule.logger")
def test_dual_delete_unmigrated_alert_rule_trigger_action(
self, mock_logger: mock.MagicMock
) -> None:
"""
Test that nothing weird happens if we try to dual delete a trigger action whose alert
rule was never dual written.
"""
unmigrated_trigger_action = self.create_alert_rule_trigger_action()
metric_alert = unmigrated_trigger_action.alert_rule_trigger.alert_rule
dual_delete_migrated_alert_rule_trigger_action(unmigrated_trigger_action)
mock_logger.info.assert_called_with(
"alert rule was not dual written, returning early",
extra={"alert_rule": metric_alert},
)
def test_dual_delete_action_missing_aarta(self) -> None:
"""
Test that we raise an exception if the aarta entry for a migrated trigger action is missing
"""
self.aarta.delete()
with pytest.raises(ActionAlertRuleTriggerAction.DoesNotExist):
dual_delete_migrated_alert_rule_trigger_action(self.alert_rule_trigger_action)
|
DualDeleteAlertRuleTriggerActionTest
|
python
|
walkccc__LeetCode
|
solutions/817. Linked List Components/817.py
|
{
"start": 0,
"end": 300
}
|
class ____:
def numComponents(self, head: ListNode | None, nums: list[int]) -> int:
ans = 0
numsSet = set(nums)
while head:
if head.val in numsSet and (
head.next == None or head.next.val not in numsSet):
ans += 1
head = head.next
return ans
|
Solution
|
python
|
PyCQA__pylint
|
tests/checkers/unittest_unicode/__init__.py
|
{
"start": 1803,
"end": 2274
}
|
class ____:
"""Simple Faker representing a Module node.
Astroid crashes in a number of cases if we want to lint unsupported encodings.
So, this is used to test the behaviour of the encoding checker.
This shall ensure that our checks keep working once Python supports UTF16/32.
"""
file: Path
def __init__(self, content: bytes):
self.content = io.BytesIO(content)
def stream(self) -> io.BytesIO:
return self.content
|
FakeNode
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mysql/mariadb.py
|
{
"start": 2258,
"end": 2479
}
|
class ____(MySQLTypeCompiler):
def visit_INET4(self, type_: INET4, **kwargs: Any) -> str:
return "INET4"
def visit_INET6(self, type_: INET6, **kwargs: Any) -> str:
return "INET6"
|
MariaDBTypeCompiler
|
python
|
keras-team__keras
|
keras/src/losses/losses.py
|
{
"start": 15516,
"end": 17173
}
|
class ____(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = square(maximum(1 - y_true * y_pred, 0))
```
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self, reduction="sum_over_batch_size", name="squared_hinge", dtype=None
):
super().__init__(
squared_hinge, name=name, reduction=reduction, dtype=dtype
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.CategoricalHinge")
|
SquaredHinge
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.