language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
src/sentry/integrations/discord/integration.py
|
{
"start": 3071,
"end": 4958
}
|
class ____(IntegrationInstallation, IntegrationNotificationClient):
def get_client(self) -> DiscordClient:
return DiscordClient()
def send_notification(
self, target: IntegrationNotificationTarget, payload: DiscordRenderable
) -> None:
client = self.get_client()
try:
client.send_message(channel_id=target.resource_id, message=payload)
except ApiError as e:
translate_discord_api_error(e)
def uninstall(self) -> None:
# If this is the only org using this Discord server, we should remove
# the bot from the server.
from sentry.integrations.services.integration import integration_service
installations = integration_service.get_organization_integrations(
integration_id=self.model.id,
providers=[IntegrationProviderSlug.DISCORD.value],
)
# Remove any installations pending deletion
active_installations = [
i
for i in installations
if i.status not in (ObjectStatus.PENDING_DELETION, ObjectStatus.DELETION_IN_PROGRESS)
]
if len(active_installations) > 1:
return
client = self.get_client()
try:
client.leave_guild(str(self.model.external_id))
except ApiError as e:
if e.code == 404:
# The bot has already been removed from the guild
return
# The bot failed to leave the guild for some other reason, but
# this doesn't need to interrupt the uninstall. Just means the
# bot will persist on the server until removed manually.
logger.error(
"discord.uninstall.failed_to_leave_guild",
extra={"discord_guild_id": self.model.external_id, "status": e.code},
)
return
|
DiscordIntegration
|
python
|
numpy__numpy
|
numpy/_core/tests/test_defchararray.py
|
{
"start": 28803,
"end": 30683
}
|
class ____:
def test_mod(self):
A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']], dtype='S')
tgt = [[b'123 abc ', b'123'],
[b'12312345', b'123MixedCase'],
[b'123123 \t 345 \0 ', b'123UPPER']]
assert_array_equal(np.char.mod(b"123%s", A), tgt)
def test_decode(self):
bytestring = b'\x81\xc1\x81\xc1\x81\xc1'
assert_equal(np.char.decode(bytestring, encoding='cp037'),
'aAaAaA')
def test_encode(self):
unicode = 'aAaAaA'
assert_equal(np.char.encode(unicode, encoding='cp037'),
b'\x81\xc1\x81\xc1\x81\xc1')
def test_expandtabs(self):
s = "\tone level of indentation\n\t\ttwo levels of indentation"
assert_equal(
np.char.expandtabs(s, tabsize=2),
" one level of indentation\n two levels of indentation"
)
def test_join(self):
seps = np.array(['-', '_'])
assert_array_equal(np.char.join(seps, 'hello'),
['h-e-l-l-o', 'h_e_l_l_o'])
def test_partition(self):
assert_equal(np.char.partition('This string', ' '),
['This', ' ', 'string'])
def test_rpartition(self):
assert_equal(np.char.rpartition('This string here', ' '),
['This string', ' ', 'here'])
def test_replace(self):
assert_equal(np.char.replace('Python is good', 'good', 'great'),
'Python is great')
def test_empty_indexing():
"""Regression test for ticket 1948."""
# Check that indexing a chararray with an empty list/array returns an
# empty chararray instead of a chararray with a single empty string in it.
s = np.char.chararray((4,))
assert_(s[[]].size == 0)
|
TestMethodsScalarValues
|
python
|
huggingface__transformers
|
src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py
|
{
"start": 2667,
"end": 8342
}
|
class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values_length: int = 0,
) -> torch.Tensor:
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
# NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0])
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
|
XLMRobertaXLEmbeddings
|
python
|
facelessuser__pymdown-extensions
|
pymdownx/details.py
|
{
"start": 1259,
"end": 6341
}
|
class ____(BlockProcessor):
"""Details block processor."""
START = re.compile(
r'(?:^|\n)\?{3}(\+)? ?(?:([\w\-]+(?: +[\w\-]+)*?)?(?: +"(.*?)")|([\w\-]+(?: +[\w\-]+)*?)) *(?:\n|$)'
)
COMPRESS_SPACES = re.compile(r' {2,}')
def __init__(self, parser):
"""Initialization."""
super().__init__(parser)
self.current_sibling = None
self.content_indention = 0
def detab_by_length(self, text, length):
"""Remove a tab from the front of each line of the given text."""
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' ' * length):
newtext.append(line[length:])
elif not line.strip():
newtext.append('') # pragma: no cover
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def parse_content(self, parent, block):
"""
Get sibling details.
Retrieve the appropriate sibling element. This can get tricky when
dealing with lists.
"""
old_block = block
non_details = ''
# We already acquired the block via test
if self.current_sibling is not None:
sibling = self.current_sibling
block, non_details = self.detab_by_length(block, self.content_indent)
self.current_sibling = None
self.content_indent = 0
return sibling, block, non_details
sibling = self.lastChild(parent)
if sibling is None or sibling.tag.lower() != 'details':
sibling = None
else:
# If the last child is a list and the content is indented sufficient
# to be under it, then the content's is sibling is in the list.
last_child = self.lastChild(sibling)
indent = 0
while last_child is not None:
if (
sibling is not None and block.startswith(' ' * self.tab_length * 2) and
last_child is not None and last_child.tag in ('ul', 'ol', 'dl')
):
# The expectation is that we'll find an `<li>`.
# We should get it's last child as well.
sibling = self.lastChild(last_child)
last_child = self.lastChild(sibling) if sibling is not None else None
# Context has been lost at this point, so we must adjust the
# text's indentation level so it will be evaluated correctly
# under the list.
block = block[self.tab_length:]
indent += self.tab_length
else:
last_child = None
if not block.startswith(' ' * self.tab_length):
sibling = None
if sibling is not None:
indent += self.tab_length
block, non_details = self.detab_by_length(old_block, indent)
self.current_sibling = sibling
self.content_indent = indent
return sibling, block, non_details
def test(self, parent, block):
"""Test block."""
if self.START.search(block):
return True
else:
return self.parse_content(parent, block)[0] is not None
def run(self, parent, blocks):
"""Convert to details/summary block."""
block = blocks.pop(0)
m = self.START.search(block)
if m:
# remove the first line
if m.start() > 0:
self.parser.parseBlocks(parent, [block[:m.start()]])
block = block[m.end():]
block, non_details = self.detab(block)
else:
sibling, block, non_details = self.parse_content(parent, block)
if m:
state = m.group(1)
is_open = state is not None
if m.group(4):
class_name = self.COMPRESS_SPACES.sub(' ', m.group(4).lower())
title = class_name.split(' ')[0].capitalize()
else:
classes = m.group(2)
class_name = '' if classes is None else self.COMPRESS_SPACES.sub(' ', classes.lower())
title = m.group(3)
div = etree.SubElement(parent, 'details', ({'open': 'open'} if is_open else {}))
if class_name:
div.set('class', class_name)
summary = etree.SubElement(div, 'summary')
summary.text = title
else:
# Sibling is a list item, but we need to wrap it's content should be wrapped in <p>
if sibling.tag in ('li', 'dd') and sibling.text:
text = sibling.text
sibling.text = ''
p = etree.SubElement(sibling, 'p')
p.text = text
div = sibling
self.parser.parseChunk(div, block)
if non_details:
# Insert the non-details content back into blocks
blocks.insert(0, non_details)
|
DetailsProcessor
|
python
|
django-haystack__django-haystack
|
test_haystack/test_fields.py
|
{
"start": 377,
"end": 4842
}
|
class ____(TestCase):
def test_get_iterable_objects_with_none(self):
self.assertEqual([], SearchField.get_iterable_objects(None))
def test_get_iterable_objects_with_single_non_iterable_object(self):
obj = object()
expected = [obj]
self.assertEqual(expected, SearchField.get_iterable_objects(obj))
def test_get_iterable_objects_with_list_stays_the_same(self):
objects = [object(), object()]
self.assertIs(objects, SearchField.get_iterable_objects(objects))
def test_get_iterable_objects_with_django_manytomany_rel(self):
left_model = ManyToManyLeftSideModel.objects.create()
right_model_1 = ManyToManyRightSideModel.objects.create(name="Right side 1")
right_model_2 = ManyToManyRightSideModel.objects.create()
left_model.related_models.add(right_model_1)
left_model.related_models.add(right_model_2)
result = SearchField.get_iterable_objects(left_model.related_models)
self.assertTrue(right_model_1 in result)
self.assertTrue(right_model_2 in result)
def test_get_iterable_objects_with_django_onetomany_rel(self):
left_model = OneToManyLeftSideModel.objects.create()
right_model_1 = OneToManyRightSideModel.objects.create(left_side=left_model)
right_model_2 = OneToManyRightSideModel.objects.create(left_side=left_model)
result = SearchField.get_iterable_objects(left_model.right_side)
self.assertTrue(right_model_1 in result)
self.assertTrue(right_model_2 in result)
def test_resolve_attributes_lookup_with_field_that_points_to_none(self):
related = Mock(spec=["none_field"], none_field=None)
obj = Mock(spec=["related"], related=[related])
field = SearchField(null=False)
self.assertRaises(
SearchFieldError,
field.resolve_attributes_lookup,
[obj],
["related", "none_field"],
)
def test_resolve_attributes_lookup_with_field_that_points_to_none_but_is_allowed_to_be_null(
self,
):
related = Mock(spec=["none_field"], none_field=None)
obj = Mock(spec=["related"], related=[related])
field = SearchField(null=True)
self.assertEqual(
[None], field.resolve_attributes_lookup([obj], ["related", "none_field"])
)
def test_resolve_attributes_lookup_with_field_that_points_to_none_but_has_default(
self,
):
related = Mock(spec=["none_field"], none_field=None)
obj = Mock(spec=["related"], related=[related])
field = SearchField(default="Default value")
self.assertEqual(
["Default value"],
field.resolve_attributes_lookup([obj], ["related", "none_field"]),
)
def test_resolve_attributes_lookup_with_deep_relationship(self):
related_lvl_2 = Mock(spec=["value"], value=1)
related = Mock(spec=["related"], related=[related_lvl_2, related_lvl_2])
obj = Mock(spec=["related"], related=[related])
field = SearchField()
self.assertEqual(
[1, 1],
field.resolve_attributes_lookup([obj], ["related", "related", "value"]),
)
def test_resolve_attributes_lookup_with_deep_relationship_through_m2m(self):
# obj.related2m:
# - related1
# .deep1
# .value = 1
# - related2
# .deep2
# .value = 2
# - related3
# .deep3
# .value = 3
values = [1, 2, 3]
deep1, deep2, deep3 = (Mock(spec=["value"], value=x) for x in values)
related1, related2, related3 = (
Mock(spec=["related"], related=x) for x in (deep1, deep2, deep3)
)
m2m_rel = Mock(
spec=["__iter__"],
__iter__=lambda self: iter([related1, related2, related3]),
)
obj = Mock(spec=["related_m2m"], related_m2m=m2m_rel)
field = SearchField()
self.assertEqual(
values,
field.resolve_attributes_lookup([obj], ["related_m2m", "related", "value"]),
)
def test_prepare_with_null_django_onetomany_rel(self):
left_model = OneToManyLeftSideModel.objects.create()
field = SearchField(model_attr="right_side__pk", null=True)
result = field.prepare(left_model)
self.assertEqual(None, result)
|
SearchFieldTestCase
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/metrics/column_map_metrics/column_values_json_parseable.py
|
{
"start": 414,
"end": 1196
}
|
class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.json_parseable"
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
def is_json(val):
try:
json.loads(val)
return True
except Exception:
return False
return column.map(is_json)
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
def is_json(val):
try:
json.loads(val)
return True
except Exception:
return False
is_json_udf = F.udf(is_json, pyspark.types.BooleanType())
return is_json_udf(column)
|
ColumnValuesJsonParseable
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/cloud_memorystore.py
|
{
"start": 1740,
"end": 21736
}
|
class ____(GoogleBaseHook):
"""
Hook for Google Cloud Memorystore APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._client: CloudRedisClient | None = None
def get_conn(self) -> CloudRedisClient:
"""Retrieve client library object that allow access to Cloud Memorystore service."""
if not self._client:
self._client = CloudRedisClient(credentials=self.get_credentials())
return self._client
@staticmethod
def _append_label(instance: Instance, key: str, val: str) -> Instance:
"""
Append labels to provided Instance type.
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param instance: The proto to append resource_label airflow
version to
:param key: The key label
:param val:
:return: The cluster proto updated with new label
"""
val = val.replace(".", "-").replace("+", "-")
instance.labels.update({key: val})
return instance
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
location: str,
instance_id: str,
instance: dict | Instance,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Create a Redis instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: Required. The logical name of the Redis instance in the customer project with the
following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:param instance: Required. A Redis [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(instance, dict):
instance = Instance(**instance)
elif not isinstance(instance, Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
parent = f"projects/{project_id}/locations/{location}"
instance_name = f"projects/{project_id}/locations/{location}/instances/{instance_id}"
try:
self.log.info("Fetching instance: %s", instance_name)
instance = client.get_instance(
request={"name": instance_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
self.log.info("Instance exists. Skipping creation.")
return instance
except NotFound:
self.log.info("Instance not exists.")
self._append_label(instance, "airflow-version", "v" + version.version)
result = client.create_instance(
request={"parent": parent, "instance_id": instance_id, "instance": instance},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance created.")
return client.get_instance(
request={"name": instance_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(
self,
location: str,
instance: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Delete a specific Redis instance. Instance stops serving and data is deleted.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Fetching Instance: %s", name)
instance = client.get_instance(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
if not instance:
return
self.log.info("Deleting Instance: %s", name)
result = client.delete_instance(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance deleted: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def export_instance(
self,
location: str,
instance: str,
output_config: dict | OutputConfig,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Export Redis instance data into a Redis RDB format file in Cloud Storage.
Redis will continue serving during this operation.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param output_config: Required. Specify data to be exported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.OutputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Exporting Instance: %s", name)
result = client.export_instance(
request={"name": name, "output_config": output_config},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance exported: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def failover_instance(
self,
location: str,
instance: str,
data_protection_mode: FailoverInstanceRequest.DataProtectionMode,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Failover of the primary node to current replica node.
Initiates a failover of the primary node to current replica node for a specific STANDARD tier Cloud
Memorystore for Redis instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param data_protection_mode: Optional. Available data protection modes that the user can choose. If
it's unspecified, data protection mode will be LIMITED_DATA_LOSS by default.
.DataProtectionMode
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Failovering Instance: %s", name)
result = client.failover_instance(
request={"name": name, "data_protection_mode": data_protection_mode},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance failovered: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
location: str,
instance: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Get the details of a specific Redis instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
result = client.get_instance(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Fetched Instance: %s", name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def import_instance(
self,
location: str,
instance: str,
input_config: dict | InputConfig,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
Redis may stop serving during this operation. Instance state will be IMPORTING for entire operation.
When complete, the instance will contain only data from the imported file.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param input_config: Required. Specify data to be imported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.InputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Importing Instance: %s", name)
result = client.import_instance(
request={"name": name, "input_config": input_config},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance imported: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def list_instances(
self,
location: str,
page_size: int,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
List Redis instances owned by a project at the specified location (region) or all locations.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per- resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
result = client.list_instances(
request={"parent": parent, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Fetched instances")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
update_mask: dict | FieldMask,
instance: dict | Instance,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
instance_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Update the metadata and configuration of a specific Redis instance.
:param update_mask: Required. Mask of fields to update. At least one path must be supplied in this
field. The elements of the repeated paths field may only include these fields from ``Instance``:
- ``displayName``
- ``labels``
- ``memorySizeGb``
- ``redisConfig``
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param instance: Required. Update description. Only fields specified in ``update_mask`` are updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(instance, dict):
instance = Instance(**instance)
elif not isinstance(instance, Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
if location and instance_id:
name = f"projects/{project_id}/locations/{location}/instances/{instance_id}"
instance.name = name
self.log.info("Updating instances: %s", instance.name)
result = client.update_instance(
request={"update_mask": update_mask, "instance": instance},
retry=retry,
timeout=timeout,
metadata=metadata,
)
updated_instance = result.result()
self.log.info("Instance updated: %s", instance.name)
return updated_instance
|
CloudMemorystoreHook
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/legacy_resources.py
|
{
"start": 1353,
"end": 9687
}
|
class ____(ConfigurableResource):
request_max_retries: int = Field(
default=3,
description=(
"The maximum number of times requests to the Airbyte API should be retried "
"before failing."
),
)
request_retry_delay: float = Field(
default=0.25,
description="Time (in seconds) to wait between each request retry.",
)
request_timeout: int = Field(
default=15,
description="Time (in seconds) after which the requests to Airbyte are declared timed out.",
)
cancel_sync_on_run_termination: bool = Field(
default=True,
description=(
"Whether to cancel a sync in Airbyte if the Dagster runner is terminated. This may"
" be useful to disable if using Airbyte sources that cannot be cancelled and"
" resumed easily, or if your Dagster deployment may experience runner interruptions"
" that do not impact your Airbyte deployment."
),
)
poll_interval: float = Field(
default=DEFAULT_POLL_INTERVAL_SECONDS,
description="Time (in seconds) to wait between checking a sync's status.",
)
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
@property
@cached_method
def _log(self) -> logging.Logger:
return get_dagster_logger()
@property
@abstractmethod
def api_base_url(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def all_additional_request_params(self) -> Mapping[str, Any]:
raise NotImplementedError()
def make_request(
self,
endpoint: str,
data: Optional[Mapping[str, object]] = None,
method: str = "POST",
include_additional_request_params: bool = True,
) -> Optional[Mapping[str, object]]:
"""Creates and sends a request to the desired Airbyte REST API endpoint.
Args:
endpoint (str): The Airbyte API endpoint to send this request to.
data (Optional[str]): JSON-formatted data string to be included in the request.
Returns:
Optional[Dict[str, Any]]: Parsed json data from the response to this request
"""
url = self.api_base_url + endpoint
headers = {"accept": "application/json"}
num_retries = 0
while True:
try:
request_args: dict[str, Any] = dict(
method=method,
url=url,
headers=headers,
timeout=self.request_timeout,
)
if data:
request_args["json"] = data
if include_additional_request_params:
request_args = deep_merge_dicts(
request_args,
self.all_additional_request_params,
)
response = requests.request(
**request_args,
)
response.raise_for_status()
if response.status_code == 204:
return None
return response.json()
except RequestException as e:
self._log.error("Request to Airbyte API failed: %s", e)
if num_retries == self.request_max_retries:
break
num_retries += 1
time.sleep(self.request_retry_delay)
raise Failure(f"Max retries ({self.request_max_retries}) exceeded with url: {url}.")
@abstractmethod
def start_sync(self, connection_id: str) -> Mapping[str, object]:
raise NotImplementedError()
@abstractmethod
def get_connection_details(self, connection_id: str) -> Mapping[str, object]:
raise NotImplementedError()
@abstractmethod
def get_job_status(self, connection_id: str, job_id: int) -> Mapping[str, object]:
raise NotImplementedError()
@abstractmethod
def cancel_job(self, job_id: int):
raise NotImplementedError()
@property
@abstractmethod
def _should_forward_logs(self) -> bool:
raise NotImplementedError()
def sync_and_poll(
self,
connection_id: str,
poll_interval: Optional[float] = None,
poll_timeout: Optional[float] = None,
) -> AirbyteOutput:
"""Initializes a sync operation for the given connector, and polls until it completes.
Args:
connection_id (str): The Airbyte Connector ID. You can retrieve this value from the
"Connection" tab of a given connection in the Arbyte UI.
poll_interval (float): The time (in seconds) that will be waited between successive polls.
poll_timeout (float): The maximum time that will waited before this operation is timed
out. By default, this will never time out.
Returns:
:py:class:`~AirbyteOutput`:
Details of the sync job.
"""
connection_details = self.get_connection_details(connection_id)
job_details = self.start_sync(connection_id)
job_info = cast("dict[str, object]", job_details.get("job", {}))
job_id = cast("int", job_info.get("id"))
self._log.info(f"Job {job_id} initialized for connection_id={connection_id}.")
start = time.monotonic()
logged_attempts = 0
logged_lines = 0
state = None
try:
while True:
if poll_timeout and start + poll_timeout < time.monotonic():
raise Failure(
f"Timeout: Airbyte job {job_id} is not ready after the timeout"
f" {poll_timeout} seconds"
)
time.sleep(poll_interval or self.poll_interval)
job_details = self.get_job_status(connection_id, job_id)
attempts = cast("list", job_details.get("attempts", []))
cur_attempt = len(attempts)
# spit out the available Airbyte log info
if cur_attempt:
if self._should_forward_logs:
log_lines = attempts[logged_attempts].get("logs", {}).get("logLines", [])
for line in log_lines[logged_lines:]:
sys.stdout.write(line + "\n")
sys.stdout.flush()
logged_lines = len(log_lines)
# if there's a next attempt, this one will have no more log messages
if logged_attempts < cur_attempt - 1:
logged_lines = 0
logged_attempts += 1
job_info = cast("dict[str, object]", job_details.get("job", {}))
state = job_info.get("status")
if state in (
AirbyteJobStatusType.RUNNING,
AirbyteJobStatusType.PENDING,
AirbyteJobStatusType.INCOMPLETE,
):
continue
elif state == AirbyteJobStatusType.SUCCEEDED:
break
elif state == AirbyteJobStatusType.ERROR:
raise Failure(f"Job failed: {job_id}")
elif state == AirbyteJobStatusType.CANCELLED:
raise Failure(f"Job was cancelled: {job_id}")
else:
raise Failure(f"Encountered unexpected state `{state}` for job_id {job_id}")
finally:
# if Airbyte sync has not completed, make sure to cancel it so that it doesn't outlive
# the python process
if (
state
not in (
AirbyteJobStatusType.SUCCEEDED,
AirbyteJobStatusType.ERROR,
AirbyteJobStatusType.CANCELLED,
)
and self.cancel_sync_on_run_termination
):
self.cancel_job(job_id)
return AirbyteOutput(job_details=job_details, connection_details=connection_details)
@superseded(
additional_warn_text=(
"If you are using Airbyte 1.6.0 or higher, please see the migration guide: https://docs.dagster.io/integrations/libraries/airbyte/migration-guide"
)
)
|
BaseAirbyteResource
|
python
|
coleifer__peewee
|
tests/transactions.py
|
{
"start": 384,
"end": 875
}
|
class ____(ModelTestCase):
requires = [Register]
def assertRegister(self, vals):
query = Register.select().order_by(Register.value)
self.assertEqual([register.value for register in query], vals)
def _save(self, *vals):
Register.insert([{Register.value: val} for val in vals]).execute()
def requires_nested(fn):
return skip_if(IS_CRDB and not IS_CRDB_NESTED_TX,
'nested transaction support is required')(fn)
|
BaseTransactionTestCase
|
python
|
ray-project__ray
|
python/ray/train/xgboost/xgboost_trainer.py
|
{
"start": 2872,
"end": 13184
}
|
class ____(SimpleXGBoostTrainer):
"""A Trainer for distributed data-parallel XGBoost training.
Example
-------
.. testcode::
:skipif: True
import xgboost
import ray.data
import ray.train
from ray.train.xgboost import RayTrainReportCallback, XGBoostTrainer
def train_fn_per_worker(config: dict):
# (Optional) Add logic to resume training state from a checkpoint.
# ray.train.get_checkpoint()
# 1. Get the dataset shard for the worker and convert to a `xgboost.DMatrix`
train_ds_iter, eval_ds_iter = (
ray.train.get_dataset_shard("train"),
ray.train.get_dataset_shard("validation"),
)
train_ds, eval_ds = train_ds_iter.materialize(), eval_ds_iter.materialize()
train_df, eval_df = train_ds.to_pandas(), eval_ds.to_pandas()
train_X, train_y = train_df.drop("y", axis=1), train_df["y"]
eval_X, eval_y = eval_df.drop("y", axis=1), eval_df["y"]
dtrain = xgboost.DMatrix(train_X, label=train_y)
deval = xgboost.DMatrix(eval_X, label=eval_y)
params = {
"tree_method": "approx",
"objective": "reg:squarederror",
"eta": 1e-4,
"subsample": 0.5,
"max_depth": 2,
}
# 2. Do distributed data-parallel training.
# Ray Train sets up the necessary coordinator processes and
# environment variables for your workers to communicate with each other.
bst = xgboost.train(
params,
dtrain=dtrain,
evals=[(deval, "validation")],
num_boost_round=10,
callbacks=[RayTrainReportCallback()],
)
train_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(32)])
eval_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(16)])
trainer = XGBoostTrainer(
train_fn_per_worker,
datasets={"train": train_ds, "validation": eval_ds},
scaling_config=ray.train.ScalingConfig(num_workers=4),
)
result = trainer.fit()
booster = RayTrainReportCallback.get_model(result.checkpoint)
Args:
train_loop_per_worker: The training function to execute on each worker.
This function can either take in zero arguments or a single ``Dict``
argument which is set by defining ``train_loop_config``.
Within this function you can use any of the
:ref:`Ray Train Loop utilities <train-loop-api>`.
train_loop_config: A configuration ``Dict`` to pass in as an argument to
``train_loop_per_worker``.
This is typically used for specifying hyperparameters.
xgboost_config: The configuration for setting up the distributed xgboost
backend. Defaults to using the "rabit" backend.
See :class:`~ray.train.xgboost.XGBoostConfig` for more info.
datasets: The Ray Datasets to use for training and validation.
dataset_config: The configuration for ingesting the input ``datasets``.
By default, all the Ray Datasets are split equally across workers.
See :class:`~ray.train.DataConfig` for more details.
scaling_config: The configuration for how to scale data parallel training.
``num_workers`` determines how many Python processes are used for training,
and ``use_gpu`` determines whether or not each process should use GPUs.
See :class:`~ray.train.ScalingConfig` for more info.
run_config: The configuration for the execution of the training run.
See :class:`~ray.train.RunConfig` for more info.
resume_from_checkpoint: A checkpoint to resume training from.
This checkpoint can be accessed from within ``train_loop_per_worker``
by calling ``ray.train.get_checkpoint()``.
metadata: Dict that should be made available via
`ray.train.get_context().get_metadata()` and in `checkpoint.get_metadata()`
for checkpoints saved from this Trainer. Must be JSON-serializable.
label_column: [Deprecated] Name of the label column. A column with this name
must be present in the training dataset.
params: [Deprecated] XGBoost training parameters.
Refer to `XGBoost documentation <https://xgboost.readthedocs.io/>`_
for a list of possible parameters.
num_boost_round: [Deprecated] Target number of boosting iterations (trees in the model).
Note that unlike in ``xgboost.train``, this is the target number
of trees, meaning that if you set ``num_boost_round=10`` and pass a model
that has already been trained for 5 iterations, it will be trained for 5
iterations more, instead of 10 more.
**train_kwargs: [Deprecated] Additional kwargs passed to ``xgboost.train()`` function.
"""
_handles_checkpoint_freq = True
_handles_checkpoint_at_end = True
def __init__(
self,
train_loop_per_worker: Optional[
Union[Callable[[], None], Callable[[Dict], None]]
] = None,
*,
train_loop_config: Optional[Dict] = None,
xgboost_config: Optional[XGBoostConfig] = None,
scaling_config: Optional[ray.train.ScalingConfig] = None,
run_config: Optional[ray.train.RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
dataset_config: Optional[ray.train.DataConfig] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
metadata: Optional[Dict[str, Any]] = None,
# TODO(justinvyu): [Deprecated] Legacy XGBoostTrainer API
label_column: Optional[str] = None,
params: Optional[Dict[str, Any]] = None,
num_boost_round: Optional[int] = None,
**train_kwargs,
):
if Version(xgboost.__version__) < Version("1.7.0"):
raise ImportError(
"`XGBoostTrainer` requires the `xgboost` version to be >= 1.7.0. "
'Upgrade with: `pip install -U "xgboost>=1.7"`'
)
# TODO(justinvyu): [Deprecated] Legacy XGBoostTrainer API
legacy_api = train_loop_per_worker is None
if legacy_api:
train_loop_per_worker = self._get_legacy_train_fn_per_worker(
xgboost_train_kwargs=train_kwargs,
run_config=run_config,
label_column=label_column,
num_boost_round=num_boost_round,
datasets=datasets,
)
train_loop_config = params or {}
elif train_kwargs:
_log_deprecation_warning(
"Passing `xgboost.train` kwargs to `XGBoostTrainer` is deprecated. "
"In your training function, you can call `xgboost.train(**kwargs)` "
"with arbitrary arguments. "
f"{LEGACY_XGBOOST_TRAINER_DEPRECATION_MESSAGE}"
)
super(XGBoostTrainer, self).__init__(
train_loop_per_worker=train_loop_per_worker,
train_loop_config=train_loop_config,
xgboost_config=xgboost_config,
scaling_config=scaling_config,
run_config=run_config,
datasets=datasets,
dataset_config=dataset_config,
resume_from_checkpoint=resume_from_checkpoint,
metadata=metadata,
)
def _get_legacy_train_fn_per_worker(
self,
xgboost_train_kwargs: Dict,
run_config: Optional[ray.train.RunConfig],
datasets: Optional[Dict[str, GenDataset]],
label_column: Optional[str],
num_boost_round: Optional[int],
) -> Callable[[Dict], None]:
"""Get the training function for the legacy XGBoostTrainer API."""
datasets = datasets or {}
if not datasets.get(TRAIN_DATASET_KEY):
raise ValueError(
"`datasets` must be provided for the XGBoostTrainer API "
"if `train_loop_per_worker` is not provided. "
"This dict must contain the training dataset under the "
f"key: '{TRAIN_DATASET_KEY}'. "
f"Got keys: {list(datasets.keys())}"
)
if not label_column:
raise ValueError(
"`label_column` must be provided for the XGBoostTrainer API "
"if `train_loop_per_worker` is not provided. "
"This is the column name of the label in the dataset."
)
num_boost_round = num_boost_round or 10
_log_deprecation_warning(LEGACY_XGBOOST_TRAINER_DEPRECATION_MESSAGE)
# Initialize a default Ray Train metrics/checkpoint reporting callback if needed
callbacks = xgboost_train_kwargs.get("callbacks", [])
user_supplied_callback = any(
isinstance(callback, RayTrainReportCallback) for callback in callbacks
)
callback_kwargs = {}
if run_config:
checkpoint_frequency = run_config.checkpoint_config.checkpoint_frequency
checkpoint_at_end = run_config.checkpoint_config.checkpoint_at_end
callback_kwargs["frequency"] = checkpoint_frequency
# Default `checkpoint_at_end=True` unless the user explicitly sets it.
callback_kwargs["checkpoint_at_end"] = (
checkpoint_at_end if checkpoint_at_end is not None else True
)
if not user_supplied_callback:
callbacks.append(RayTrainReportCallback(**callback_kwargs))
xgboost_train_kwargs["callbacks"] = callbacks
train_fn_per_worker = partial(
_xgboost_train_fn_per_worker,
label_column=label_column,
num_boost_round=num_boost_round,
dataset_keys=set(datasets),
xgboost_train_kwargs=xgboost_train_kwargs,
)
return train_fn_per_worker
@classmethod
def get_model(
cls,
checkpoint: Checkpoint,
) -> xgboost.Booster:
"""Retrieve the XGBoost model stored in this checkpoint."""
return RayTrainReportCallback.get_model(checkpoint)
|
XGBoostTrainer
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 319431,
"end": 320176
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of UnmarkIssueAsDuplicate"""
__schema__ = github_schema
__field_names__ = ("duplicate_id", "canonical_id", "client_mutation_id")
duplicate_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="duplicateId")
"""ID of the issue or pull request currently marked as a duplicate."""
canonical_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="canonicalId")
"""ID of the issue or pull request currently considered
canonical/authoritative/original.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
UnmarkIssueAsDuplicateInput
|
python
|
conda__conda
|
conda/core/package_cache_data.py
|
{
"start": 2551,
"end": 3116
}
|
class ____(type):
"""This metaclass does basic caching of PackageCache instance objects."""
def __call__(cls, pkgs_dir: str | os.PathLike | Path):
if isinstance(pkgs_dir, PackageCacheData):
return pkgs_dir
elif (pkgs_dir := str(pkgs_dir)) in PackageCacheData._cache_:
return PackageCacheData._cache_[pkgs_dir]
else:
package_cache_instance = super().__call__(pkgs_dir)
PackageCacheData._cache_[pkgs_dir] = package_cache_instance
return package_cache_instance
|
PackageCacheType
|
python
|
walkccc__LeetCode
|
solutions/2321. Maximum Score Of Spliced Array/2321.py
|
{
"start": 0,
"end": 526
}
|
class ____:
def maximumsSplicedArray(self, nums1: list[int], nums2: list[int]) -> int:
def kadane(nums1: list[int], nums2: list[int]) -> int:
"""
Returns the maximum gain of swapping some numbers in `nums1` with some
numbers in `nums2`.
"""
gain = 0
maxGain = 0
for num1, num2 in zip(nums1, nums2):
gain = max(0, gain + num2 - num1)
maxGain = max(maxGain, gain)
return maxGain + sum(nums1)
return max(kadane(nums1, nums2), kadane(nums2, nums1))
|
Solution
|
python
|
MorvanZhou__Reinforcement-learning-with-tensorflow
|
contents/10_A3C/A3C_discrete_action.py
|
{
"start": 802,
"end": 4413
}
|
class ____(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.int32, [None, ], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
self.a_prob, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('a_loss'):
log_prob = tf.reduce_sum(tf.log(self.a_prob + 1e-5) * tf.one_hot(self.a_his, N_A, dtype=tf.float32), axis=1, keep_dims=True)
exp_v = log_prob * tf.stop_gradient(td)
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob + 1e-5),
axis=1, keep_dims=True) # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
a_prob = tf.layers.dense(l_a, N_A, tf.nn.softmax, kernel_initializer=w_init, name='ap')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return a_prob, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
prob_weights = SESS.run(self.a_prob, feed_dict={self.s: s[np.newaxis, :]})
action = np.random.choice(range(prob_weights.shape[1]),
p=prob_weights.ravel()) # select action w.r.t the actions prob
return action
|
ACNet
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/components/lib/definitions_component/__init__.py
|
{
"start": 514,
"end": 891
}
|
class ____(Component, Model, Resolvable):
"""An arbitrary set of Dagster definitions."""
path: Optional[str]
def build_defs(self, context: ComponentLoadContext) -> Definitions:
component = PythonFileComponent(
Path(self.path) if self.path else context.path, components={}
)
return component.build_defs(context)
|
DefinitionsComponent
|
python
|
getsentry__sentry
|
tests/snuba/tsdb/test_tsdb_backend.py
|
{
"start": 31968,
"end": 33965
}
|
class ____(TestCase):
def setUp(self) -> None:
self.db = SnubaTSDB()
def run_test(self, end, interval, jitter, expected_start, expected_end):
start = end - interval
rollup, rollup_series = self.db.get_optimal_rollup_series(start, end)
series = self.db._add_jitter_to_series(rollup_series, start, rollup, jitter)
assert to_datetime(series[0]) == expected_start
assert to_datetime(series[-1]) == expected_end
def test(self) -> None:
self.run_test(
end=datetime(2022, 5, 18, 10, 23, 4, tzinfo=UTC),
interval=timedelta(hours=1),
jitter=5,
expected_start=datetime(2022, 5, 18, 9, 22, 55, tzinfo=UTC),
expected_end=datetime(2022, 5, 18, 10, 22, 55, tzinfo=UTC),
)
self.run_test(
end=datetime(2022, 5, 18, 10, 23, 8, tzinfo=UTC),
interval=timedelta(hours=1),
jitter=5,
expected_start=datetime(2022, 5, 18, 9, 23, 5, tzinfo=UTC),
expected_end=datetime(2022, 5, 18, 10, 23, 5, tzinfo=UTC),
)
# Jitter should be the same
self.run_test(
end=datetime(2022, 5, 18, 10, 23, 8, tzinfo=UTC),
interval=timedelta(hours=1),
jitter=55,
expected_start=datetime(2022, 5, 18, 9, 23, 5, tzinfo=UTC),
expected_end=datetime(2022, 5, 18, 10, 23, 5, tzinfo=UTC),
)
self.run_test(
end=datetime(2022, 5, 18, 22, 33, 2, tzinfo=UTC),
interval=timedelta(minutes=1),
jitter=3,
expected_start=datetime(2022, 5, 18, 22, 31, 53, tzinfo=UTC),
expected_end=datetime(2022, 5, 18, 22, 32, 53, tzinfo=UTC),
)
def test_empty_series(self) -> None:
assert self.db._add_jitter_to_series([], datetime(2022, 5, 18, 10, 23, 4), 60, 127) == []
assert self.db._add_jitter_to_series([], datetime(2022, 5, 18, 10, 23, 4), 60, None) == []
|
AddJitterToSeriesTest
|
python
|
getsentry__sentry
|
src/sentry/migrations/0985_add_timestamp_to_grouphash_table.py
|
{
"start": 184,
"end": 1899
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0984_authprovider_json_field"),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[
migrations.AddField(
model_name="grouphash",
name="date_added",
field=models.DateTimeField(null=True),
),
],
state_operations=[
migrations.AddField(
model_name="grouphash",
name="date_added",
field=models.DateTimeField(default=django.utils.timezone.now, null=True),
),
],
),
]
|
Migration
|
python
|
keras-team__keras
|
keras/src/constraints/constraints.py
|
{
"start": 5292,
"end": 7422
}
|
class ____(Constraint):
"""MinMaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have the norm between a lower bound and an upper bound.
Args:
min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be
rescaled to yield
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
Effectively, this means that rate=1.0 stands for strict
enforcement of the constraint, while rate<1.0 means that
weights will be rescaled at each step to slowly move
towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
self.min_value = min_value
self.max_value = max_value
self.rate = rate
self.axis = axis
def __call__(self, w):
w = backend.convert_to_tensor(w)
norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
desired = (
self.rate * ops.clip(norms, self.min_value, self.max_value)
+ (1 - self.rate) * norms
)
return ops.cast(w, norms.dtype) * (
desired / (backend.epsilon() + norms)
)
def get_config(self):
return {
"min_value": self.min_value,
"max_value": self.max_value,
"rate": self.rate,
"axis": self.axis,
}
|
MinMaxNorm
|
python
|
walkccc__LeetCode
|
solutions/2492. Minimum Score of a Path Between Two Cities/2492.py
|
{
"start": 0,
"end": 507
}
|
class ____:
def minScore(self, n: int, roads: list[list[int]]) -> int:
ans = math.inf
graph = [[] for _ in range(n + 1)] # graph[u] := [(v, distance)]
q = collections.deque([1])
seen = {1}
for u, v, distance in roads:
graph[u].append((v, distance))
graph[v].append((u, distance))
while q:
u = q.popleft()
for v, d in graph[u]:
ans = min(ans, d)
if v in seen:
continue
q.append(v)
seen.add(v)
return ans
|
Solution
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/emr.py
|
{
"start": 4376,
"end": 6796
}
|
class ____(AwsBaseSensor[EmrServerlessHook]):
"""
Poll the state of the job run until it reaches a terminal state; fails if the job run fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EmrServerlessJobSensor`
:param application_id: application_id to check the state of
:param job_run_id: job_run_id to check the state of
:param target_states: a set of states to wait for, defaults to 'SUCCESS'
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.h
"""
aws_hook_class = EmrServerlessHook
template_fields: Sequence[str] = aws_template_fields(
"application_id",
"job_run_id",
)
def __init__(
self,
*,
application_id: str,
job_run_id: str,
target_states: set | frozenset = frozenset(EmrServerlessHook.JOB_SUCCESS_STATES),
**kwargs: Any,
) -> None:
self.target_states = target_states
self.application_id = application_id
self.job_run_id = job_run_id
super().__init__(**kwargs)
def poke(self, context: Context) -> bool:
response = self.hook.conn.get_job_run(applicationId=self.application_id, jobRunId=self.job_run_id)
state = response["jobRun"]["state"]
if state in EmrServerlessHook.JOB_FAILURE_STATES:
raise AirflowException(
f"EMR Serverless job failed: {self.failure_message_from_response(response)}"
)
return state in self.target_states
@staticmethod
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
"""
return response["jobRun"]["stateDetails"]
|
EmrServerlessJobSensor
|
python
|
scipy__scipy
|
scipy/fftpack/tests/test_real_transforms.py
|
{
"start": 9377,
"end": 9520
}
|
class ____(_TestDCTIIIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
|
TestDCTIIIFloat
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_associationproxy.py
|
{
"start": 18882,
"end": 28846
}
|
class ____(_CollectionOperations):
collection_class = set
def test_set_operations(self):
Parent, Child = self.classes.Parent, self.classes.Child
self.session = fixture_session()
p1 = Parent("P1")
self.assert_(not p1._children)
self.assert_(not p1.children)
ch1 = Child("regular")
p1._children.add(ch1)
self.assert_(ch1 in p1._children)
self.assert_(len(p1._children) == 1)
self.assert_(p1.children)
self.assert_(len(p1.children) == 1)
self.assert_(ch1 not in p1.children)
self.assert_("regular" in p1.children)
p1.children.add("proxied")
self.assert_("proxied" in p1.children)
self.assert_("proxied" not in p1._children)
self.assert_(len(p1.children) == 2)
self.assert_(len(p1._children) == 2)
self.assert_({o.name for o in p1._children} == {"regular", "proxied"})
ch2 = None
for o in p1._children:
if o.name == "proxied":
ch2 = o
break
p1._children.remove(ch2)
self.assert_(len(p1._children) == 1)
self.assert_(len(p1.children) == 1)
self.assert_(p1._children == {ch1})
p1.children.remove("regular")
self.assert_(len(p1._children) == 0)
self.assert_(len(p1.children) == 0)
p1.children = ["a", "b", "c"]
self.assert_(len(p1._children) == 3)
self.assert_(len(p1.children) == 3)
del ch1
p1 = self.roundtrip(p1)
self.assert_(len(p1._children) == 3)
self.assert_(len(p1.children) == 3)
self.assert_("a" in p1.children)
self.assert_("b" in p1.children)
self.assert_("d" not in p1.children)
self.assert_(p1.children == {"a", "b", "c"})
assert_raises(KeyError, p1.children.remove, "d")
self.assert_(len(p1.children) == 3)
p1.children.discard("d")
self.assert_(len(p1.children) == 3)
p1 = self.roundtrip(p1)
self.assert_(len(p1.children) == 3)
popped = p1.children.pop()
self.assert_(len(p1.children) == 2)
self.assert_(popped not in p1.children)
p1 = self.roundtrip(p1)
self.assert_(len(p1.children) == 2)
self.assert_(popped not in p1.children)
p1.children = ["a", "b", "c"]
p1 = self.roundtrip(p1)
self.assert_(p1.children == {"a", "b", "c"})
p1.children.discard("b")
p1 = self.roundtrip(p1)
self.assert_(p1.children == {"a", "c"})
p1.children.remove("a")
p1 = self.roundtrip(p1)
self.assert_(p1.children == {"c"})
p1._children = set()
self.assert_(len(p1.children) == 0)
try:
p1._children = []
self.assert_(False)
except TypeError:
self.assert_(True)
try:
p1._children = None
self.assert_(False)
except TypeError:
self.assert_(True)
assert_raises(TypeError, set, [p1.children])
def test_special_binops_checks(self):
"""test for #11349"""
Parent = self.classes.Parent
p1 = Parent("P1")
p1.children = ["a", "b", "c"]
control = {"a", "b", "c"}
with expect_raises(TypeError):
control | ["c", "d"]
with expect_raises(TypeError):
p1.children | ["c", "d"]
with expect_raises(TypeError):
control |= ["c", "d"]
with expect_raises(TypeError):
p1.children |= ["c", "d"]
with expect_raises(TypeError):
control & ["c", "d"]
with expect_raises(TypeError):
p1.children & ["c", "d"]
with expect_raises(TypeError):
control &= ["c", "d"]
with expect_raises(TypeError):
p1.children &= ["c", "d"]
with expect_raises(TypeError):
control ^ ["c", "d"]
with expect_raises(TypeError):
p1.children ^ ["c", "d"]
with expect_raises(TypeError):
control ^= ["c", "d"]
with expect_raises(TypeError):
p1.children ^= ["c", "d"]
with expect_raises(TypeError):
control - ["c", "d"]
with expect_raises(TypeError):
p1.children - ["c", "d"]
with expect_raises(TypeError):
control -= ["c", "d"]
with expect_raises(TypeError):
p1.children -= ["c", "d"]
def test_set_comparisons(self):
Parent = self.classes.Parent
p1 = Parent("P1")
p1.children = ["a", "b", "c"]
control = {"a", "b", "c"}
for other in (
{"a", "b", "c"},
{"a", "b", "c", "d"},
{"a"},
{"a", "b"},
{"c", "d"},
{"e", "f", "g"},
set(),
):
eq_(p1.children.union(other), control.union(other))
eq_(p1.children.difference(other), control.difference(other))
eq_((p1.children - other), (control - other))
eq_(p1.children.intersection(other), control.intersection(other))
eq_(
p1.children.symmetric_difference(other),
control.symmetric_difference(other),
)
eq_(p1.children.issubset(other), control.issubset(other))
eq_(p1.children.issuperset(other), control.issuperset(other))
self.assert_((p1.children == other) == (control == other))
self.assert_((p1.children != other) == (control != other))
self.assert_((p1.children < other) == (control < other))
self.assert_((p1.children <= other) == (control <= other))
self.assert_((p1.children > other) == (control > other))
self.assert_((p1.children >= other) == (control >= other))
def test_set_comparison_empty_to_empty(self):
# test issue #3265 which was fixed in Python version 2.7.8
Parent = self.classes.Parent
p1 = Parent("P1")
p1.children = []
p2 = Parent("P2")
p2.children = []
set_0 = set()
set_a = p1.children
set_b = p2.children
is_(set_a == set_a, True)
is_(set_a == set_b, True)
is_(set_a == set_0, True)
is_(set_0 == set_a, True)
is_(set_a != set_a, False)
is_(set_a != set_b, False)
is_(set_a != set_0, False)
is_(set_0 != set_a, False)
def test_set_mutation(self):
Parent = self.classes.Parent
self.session = fixture_session()
# mutations
for op in (
"update",
"intersection_update",
"difference_update",
"symmetric_difference_update",
):
for base in (["a", "b", "c"], []):
for other in (
{"a", "b", "c"},
{"a", "b", "c", "d"},
{"a"},
{"a", "b"},
{"c", "d"},
{"e", "f", "g"},
set(),
):
p = Parent("p")
p.children = base[:]
control = set(base[:])
getattr(p.children, op)(other)
getattr(control, op)(other)
try:
self.assert_(p.children == control)
except Exception:
print("Test %s.%s(%s):" % (set(base), op, other))
print("want", repr(control))
print("got", repr(p.children))
raise
p = self.roundtrip(p)
try:
self.assert_(p.children == control)
except Exception:
print("Test %s.%s(%s):" % (base, op, other))
print("want", repr(control))
print("got", repr(p.children))
raise
# in-place mutations
for op in ("|=", "-=", "&=", "^="):
for base in (["a", "b", "c"], []):
for other in (
{"a", "b", "c"},
{"a", "b", "c", "d"},
{"a"},
{"a", "b"},
{"c", "d"},
{"e", "f", "g"},
frozenset(["e", "f", "g"]),
set(),
):
p = Parent("p")
p.children = base[:]
control = set(base[:])
exec("p.children %s other" % op)
exec("control %s other" % op)
try:
self.assert_(p.children == control)
except Exception:
print("Test %s %s %s:" % (set(base), op, other))
print("want", repr(control))
print("got", repr(p.children))
raise
p = self.roundtrip(p)
try:
self.assert_(p.children == control)
except Exception:
print("Test %s %s %s:" % (base, op, other))
print("want", repr(control))
print("got", repr(p.children))
raise
def test_bulk_replace(self):
Parent = self.classes.Parent
p1 = Parent("foo")
p1.children = {"a", "b", "c"}
assocs = set(p1._children)
keep_assocs = {a for a in assocs if a.name in ("a", "c")}
eq_(len(keep_assocs), 2)
remove_assocs = {a for a in assocs if a.name == "b"}
p1.children = {"a", "c", "d"}
eq_({a for a in p1._children if a.name in ("a", "c")}, keep_assocs)
assert not remove_assocs.intersection(p1._children)
eq_(p1.children, {"a", "c", "d"})
|
SetTest
|
python
|
huggingface__transformers
|
tests/models/sam/test_modeling_sam.py
|
{
"start": 1370,
"end": 5448
}
|
class ____:
def __init__(
self,
parent,
hidden_size=36,
intermediate_size=72,
projection_dim=62,
output_channels=32,
num_hidden_layers=2,
num_attention_heads=4,
num_channels=3,
image_size=24,
patch_size=2,
hidden_act="gelu",
layer_norm_eps=1e-06,
dropout=0.0,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
qkv_bias=True,
mlp_ratio=4.0,
use_abs_pos=True,
use_rel_pos=True,
rel_pos_zero_init=False,
window_size=14,
global_attn_indexes=[2, 5, 8, 11],
num_pos_feats=16,
mlp_dim=None,
batch_size=2,
):
self.parent = parent
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.output_channels = output_channels
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.qkv_bias = qkv_bias
self.mlp_ratio = mlp_ratio
self.use_abs_pos = use_abs_pos
self.use_rel_pos = use_rel_pos
self.rel_pos_zero_init = rel_pos_zero_init
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.num_pos_feats = num_pos_feats
self.mlp_dim = mlp_dim
self.batch_size = batch_size
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def get_config(self):
return SamVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
initializer_factor=self.initializer_factor,
output_channels=self.output_channels,
qkv_bias=self.qkv_bias,
mlp_ratio=self.mlp_ratio,
use_abs_pos=self.use_abs_pos,
use_rel_pos=self.use_rel_pos,
rel_pos_zero_init=self.rel_pos_zero_init,
window_size=self.window_size,
global_attn_indexes=self.global_attn_indexes,
num_pos_feats=self.num_pos_feats,
mlp_dim=self.mlp_dim,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def create_and_check_model(self, config, pixel_values):
model = SamVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
output_size = self.image_size // self.patch_size
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_channels, output_size, output_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
|
SamVisionModelTester
|
python
|
ipython__ipython
|
IPython/core/display.py
|
{
"start": 14928,
"end": 15032
}
|
class ____(TextDisplayObject):
def _repr_latex_(self):
return self._data_and_metadata()
|
Latex
|
python
|
astropy__astropy
|
astropy/io/ascii/fixedwidth.py
|
{
"start": 15339,
"end": 15483
}
|
class ____(FixedWidthSplitter):
"""Splitter for fixed width tables splitting on ``' '``."""
delimiter = " "
|
FixedWidthTwoLineDataSplitter
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py
|
{
"start": 30237,
"end": 34829
}
|
class ____(googletest.Benchmark):
# Configurations to test. See `run_benchmark` for config param docs.
CONFIGS = [
{'shape': [10, 10]},
{'shape': [10, 1000]},
{'shape': [1000, 10]},
{'shape': [1000, 10], 'fill': [1, 0.95]}, # Mostly full.
{'shape': [1000, 10], 'fill': [1, 0.05]}, # Mostly empty.
{'shape': [1000, 10], 'dtype': dtypes.string},
{'shape': [1000, 10], 'dtype': dtypes.int64},
{'shape': [100, 100]},
{'shape': [50, 50, 32]},
{'shape': [100, 100, 100], 'min_iters': 100},
{'shape': [1000, 1000], 'min_iters': 100},
{'shape': [10, 10, 10, 10, 10]},
{'shape': [10, 10, 10, 10, 10], 'ragged_rank': 1},
{'shape': [10, 10, 10, 10, 10], 'ragged_rank': 2},
{'shape': [50, 50, 32], 'ragged_rank': 1, 'default_shape': [32]},
{'shape': [200, 50, 32], 'ragged_rank': 1, 'default_shape': [32]}
] # pyformat: disable
def run_benchmark(self,
shape=(100, 100),
ragged_rank=None,
dtype=dtypes.float32,
fill=None,
default_shape=(),
output_shape=None,
min_iters=1000):
"""Run a benchmark with the specified configuration parameters.
Args:
shape: Bounding box for the input ragged tensor.
ragged_rank: Ragged rank for the input ragged tensor. Defaults to
`len(shape)-1`.
dtype: Data type for the input ragged tensor.
fill: How full each dimension should be (0-1). Corresponds 1:1 with
`shape`. Defaults to 0.8 for each dimension.
default_shape: Shape for the default (padding) value.
output_shape: Output shape -- ragged tensor will be padded or cropped to
this shape.
min_iters: Minimum iterations for benchmark.
"""
if ragged_rank is None:
ragged_rank = len(shape) - 1
if fill is None:
fill = [0.8 for _ in shape]
# Build the inputs for the op.
rt_input = self._generateRaggedTensor(shape, ragged_rank, dtype, fill)
default_value = constant_op.constant(
self._generateRaggedTensor(default_shape, 0, dtype), dtype=dtype)
mbs = np.prod(shape) / (2**20)
with session.Session(config=benchmark.benchmark_config()) as sess:
extras = {
'shape': shape,
'ragged_rank': ragged_rank,
'dtype': dtype,
'fill': fill,
'default_shape': default_shape
}
rt = ragged_factory_ops.constant(rt_input, dtype, ragged_rank=ragged_rank)
# Inputs for with_splits:
splits_rt_placeholder = ragged_factory_ops.placeholder(
dtype, ragged_rank, shape[ragged_rank + 1:])
splits_feed_dict = {splits_rt_placeholder: sess.run(rt)}
# Inputs for with_rowids:
rowids_feed_dict = {}
rowids_rt_placeholder = rebuild_ragged_tensor_with_value_rowids(
rt, rowids_feed_dict, sess)
# Common arguments for benchmarks:
run_op_benchmark_kwargs = dict(
sess=sess,
store_memory_usage=True,
min_iters=min_iters,
burn_iters=max(5, min_iters // 10),
mbs=mbs,
extras=extras)
ragged_to_tensor_with_splits = splits_rt_placeholder.to_tensor(
default_value=default_value)
self.run_op_benchmark(
op_or_tensor=ragged_to_tensor_with_splits.op,
name='ragged_to_tensor_with_splits',
feed_dict=splits_feed_dict,
**run_op_benchmark_kwargs)
ragged_to_tensor_with_rowids = rowids_rt_placeholder.to_tensor(
default_value=default_value)
self.run_op_benchmark(
op_or_tensor=ragged_to_tensor_with_rowids.op,
name='ragged_to_tensor_with_rowids',
feed_dict=rowids_feed_dict,
**run_op_benchmark_kwargs)
def _generateRaggedTensor(self, shape, ragged_rank, dtype, fill=None, axis=0):
if axis == len(shape):
value = random.random()
if dtype == dtypes.string:
value = str(value)
if dtype.is_integer:
value = int(value * 1000)
return value
if axis == 0 or axis > ragged_rank:
slice_size = shape[axis]
else:
slice_size = (np.random.geometric(fill[axis], shape[axis]) == 1).sum()
return [
self._generateRaggedTensor(shape, ragged_rank, dtype, fill, axis + 1)
for _ in range(slice_size)
]
def benchmark_ragged_to_dense(self):
random.seed(5)
for config in self.CONFIGS:
self.run_benchmark(**config)
if __name__ == '__main__':
googletest.main()
|
RaggedToDenseBenchmark
|
python
|
neetcode-gh__leetcode
|
python/0901-online-stock-span.py
|
{
"start": 0,
"end": 333
}
|
class ____:
def __init__(self):
self.stack = [] # pair: (price, span)
def next(self, price: int) -> int:
span = 1
while self.stack and self.stack[-1][0] <= price:
span += self.stack[-1][1]
self.stack.pop()
self.stack.append((price, span))
return span
|
StockSpanner
|
python
|
getsentry__sentry
|
tests/acceptance/test_link_team.py
|
{
"start": 464,
"end": 4196
}
|
class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=self.user)
self.team = self.create_team(organization=self.org, name="Team One")
self.create_member(
user=None,
email="bar@example.com",
organization=self.org,
role="owner",
teams=[self.team],
)
self.create_team_membership(user=self.user, team=self.team)
self.team_admin_user = self.create_user()
self.create_member(
user=self.team_admin_user,
team_roles=[(self.team, "admin")],
organization=self.org,
role="member",
)
self.integration = self.create_provider_integration(
provider="slack",
name="Team A",
external_id="TXXXXXXX1",
metadata={
"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"installation_type": "born_as_bot",
},
)
self.integration.add_organization(self.org, self.user)
self.idp = self.create_identity_provider(type="slack", external_id="TXXXXXXX1")
self.identity = Identity.objects.create(
external_id="UXXXXXXX1",
idp=self.idp,
user=self.user,
status=IdentityStatus.VALID,
scopes=[],
)
linking_url = urlparse(
build_team_linking_url(
self.integration,
"UXXXXXXX1",
"CXXXXXXX9",
"general",
"http://example.slack.com/response_url",
)
)
self.path = linking_url.path
def test_link_team(self) -> None:
self.login_as(self.user)
self.browser.get(self.path)
self.browser.wait_until_not(".loading")
self.browser.click('[name="team"]')
self.browser.click(f'[value="{self.team.id}"]')
self.browser.click('[type="submit"]')
# Ensure we get to the next page before checking for the ExternalActor
self.browser.wait_until_test_id("back-to-slack")
assert ExternalActor.objects.filter(
team_id=self.team.id,
organization=self.org,
integration_id=self.integration.id,
provider=ExternalProviders.SLACK.value,
external_name="general",
external_id="CXXXXXXX9",
).exists()
def test_link_team_as_team_admin(self) -> None:
self.create_team(organization=self.org, name="Team Two")
self.create_team(organization=self.org, name="Team Three")
self.login_as(self.team_admin_user)
self.browser.get(self.path)
self.browser.wait_until_not(".loading")
self.browser.click('[name="team"]')
select_element = self.browser.find_element(by=By.ID, value="id_team")
option_elements = select_element.find_elements(by=By.TAG_NAME, value="option")
# Ensure only the team the user is team admin is on is shown
assert len(option_elements) == 1
self.browser.click(f'[value="{self.team.id}"]')
self.browser.click('[type="submit"]')
# Ensure we get to the next page before checking for the ExternalActor
self.browser.wait_until_test_id("back-to-slack")
assert ExternalActor.objects.filter(
team_id=self.team.id,
organization=self.org,
integration_id=self.integration.id,
provider=ExternalProviders.SLACK.value,
external_name="general",
external_id="CXXXXXXX9",
).exists()
|
SlackLinkTeamTest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-commcare/source_commcare/source.py
|
{
"start": 10408,
"end": 12951
}
|
class ____(AbstractSource):
def check_connection(self, logger, config) -> Tuple[bool, any]:
if "api_key" not in config:
return False, None
return True, None
def base_schema(self):
return {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {"id": {"type": "string"}, "indexed_on": {"type": "string", "format": "date-time"}},
}
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
auth = TokenAuthenticator(config["api_key"], auth_method="ApiKey")
args = {
"authenticator": auth,
}
appdata = Application(**{**args, "app_id": config["app_id"], "project_space": config["project_space"]}).read_records(
sync_mode=SyncMode.full_refresh
)
# Generate streams for forms, one per xmlns and one stream for cases.
streams = self.generate_streams(args, config, appdata)
return streams
def generate_streams(self, args, config, appdata):
form_args = {"app_id": config["app_id"], "start_date": config["start_date"], "project_space": config["project_space"], **args}
streams = []
name2xmlns = {}
# Collect the form names and xmlns from the application
for record in appdata:
mods = record["modules"]
for m in mods:
forms = m["forms"]
for f in forms:
xmlns = f["xmlns"]
formname = ""
if "en" in f["name"]:
formname = f["name"]["en"].strip()
else:
# Unknown forms are named UNNAMED_xxxxx where xxxxx are the last 5 difits of the XMLNS
# This convention gives us repeatable names
formname = f"Unnamed_{xmlns[-5:]}"
name = formname
name2xmlns[name] = xmlns
# Create the streams from the collected names
# Sorted by name
for k in sorted(name2xmlns):
key = name2xmlns[k]
stream = Form(name=k, xmlns=key, schema=self.base_schema(), **form_args)
streams.append(stream)
stream = Case(
app_id=config["app_id"],
start_date=config["start_date"],
schema=self.base_schema(),
project_space=config["project_space"],
**args,
)
streams.append(stream)
return streams
|
SourceCommcare
|
python
|
facebook__pyre-check
|
client/dataclasses_json_extensions.py
|
{
"start": 989,
"end": 1271
}
|
class ____(DataclassJsonMixinWithCachedSchema):
dataclass_json_config: Mapping[str, object] = dataclasses_json.config(
letter_case=dataclasses_json.LetterCase.SNAKE,
undefined=dataclasses_json.Undefined.EXCLUDE,
)["dataclasses_json"]
|
SnakeCaseAndExcludeJsonMixin
|
python
|
PrefectHQ__prefect
|
src/prefect/exceptions.py
|
{
"start": 10164,
"end": 10300
}
|
class ____(PrefectException):
"""
Raised when attempting to call Task.map with all static arguments
"""
|
MappingMissingIterable
|
python
|
astropy__astropy
|
astropy/time/formats.py
|
{
"start": 79580,
"end": 80169
}
|
class ____(TimeDeltaFormat, TimeNumeric):
_check_finite = False
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1.0 / self.unit)
def to_value(self, **kwargs):
# Note that 1/unit is always exactly representable, so the
# following multiplications are exact.
factor = 1.0 / self.unit
jd1 = self.jd1 * factor
jd2 = self.jd2 * factor
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
|
TimeDeltaNumeric
|
python
|
conda__conda
|
conda/plugins/types.py
|
{
"start": 2779,
"end": 6090
}
|
class ____(CondaPlugin):
"""
Return type to use when defining a conda virtual package plugin hook.
For details on how this is used, see
:meth:`~conda.plugins.hookspec.CondaSpecs.conda_virtual_packages`.
.. note::
The ``version`` and ``build`` parameters can be provided in two ways:
1. Direct values: a string or ``None`` (where ``None`` translates to ``0``)
2. Deferred callables: functions that return either a string, ``None`` (translates to ``0``),
or ``NULL`` (indicates the virtual package should not be exported)
:param name: Virtual package name (e.g., ``my_custom_os``).
:param version: Virtual package version (e.g., ``1.2.3``).
:param build: Virtual package build string (e.g., ``x86_64``).
:param override_entity: Can be set to either to "version" or "build", the corresponding
value will be overridden if the environment variable
``CONDA_OVERRIDE_<name>`` is set.
:param empty_override: Value to use for version or build if the override
environment variable is set to an empty string. By default,
this is ``NULL``.
:param version_validation: Optional version validation function to ensure that the override version follows a certain pattern.
"""
name: str
version: str | None | Callable[[], str | None | _Null]
build: str | None | Callable[[], str | None | _Null]
override_entity: Literal["version", "build"] | None = None
empty_override: None | _Null = NULL
version_validation: Callable[[str], str | None] | None = None
def to_virtual_package(self) -> PackageRecord | _Null:
# Take the raw version and build as they are.
# At this point, they may be callables (evaluated later) or direct values.
from conda.base.context import context
version = self.version
build = self.build
# Check for environment overrides.
# Overrides always yield a concrete value (string, NULL, or None),
# so after this step, version/build will no longer be callables if they were overridden.
if self.override_entity:
# environment variable has highest precedence
override_value = os.getenv(f"{APP_NAME}_OVERRIDE_{self.name}".upper())
# fallback to context
if override_value is None and context.override_virtual_packages:
override_value = context.override_virtual_packages.get(f"{self.name}")
if override_value is not None:
override_value = override_value.strip() or self.empty_override
if self.override_entity == "version":
version = override_value
elif self.override_entity == "build":
build = override_value
# If version/build were not overridden and are callables, evaluate them now.
version = maybecall(version)
build = maybecall(build)
if version is NULL or build is NULL:
return NULL
if self.version_validation and version is not None:
version = self.version_validation(version)
return PackageRecord.virtual_package(f"__{self.name}", version, build)
@dataclass
|
CondaVirtualPackage
|
python
|
doocs__leetcode
|
lcof2/剑指 Offer II 018. 有效的回文/Solution.py
|
{
"start": 0,
"end": 378
}
|
class ____:
def isPalindrome(self, s: str) -> bool:
i, j = 0, len(s) - 1
while i < j:
while i < j and not s[i].isalnum():
i += 1
while i < j and not s[j].isalnum():
j -= 1
if s[i].lower() != s[j].lower():
return False
i, j = i + 1, j - 1
return True
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/3343. Count Number of Balanced Permutations/3343.py
|
{
"start": 0,
"end": 1143
}
|
class ____:
def countBalancedPermutations(self, num: str) -> int:
nums = list(map(int, num))
summ = sum(nums)
if summ % 2 == 1:
return 0
nums.sort(reverse=True)
@functools.lru_cache(None)
def dp(even: int, odd: int, evenBalance: int) -> int:
"""
Returns the number of permutations where there are `even` even indices
left, `odd` odd indices left, and `evenBalance` is the target sum of the
remaining numbers to be placed in even indices.
"""
if evenBalance < 0:
return 0
if even == 0:
return (evenBalance == 0) * math.factorial(odd)
if odd == 0:
return (sum(nums[-(even + odd):]) == evenBalance) * math.factorial(even)
return (dp(even - 1, odd, evenBalance - nums[-(odd + even)]) * even +
dp(even, odd - 1, evenBalance) * odd)
MOD = 1_000_000_007
perm = functools.reduce(lambda x, y: x * math.factorial(y),
collections.Counter(nums).values(), 1)
return (dp(even=(len(nums) + 1) // 2,
odd=len(nums) // 2,
evenBalance=summ // 2) // perm) % MOD
|
Solution
|
python
|
ray-project__ray
|
rllib/policy/tf_mixins.py
|
{
"start": 7339,
"end": 9804
}
|
class ____:
"""Assign the `update_target` method to the policy.
The function is called every `target_network_update_freq` steps by the
master learner.
"""
def __init__(self):
model_vars = self.model.trainable_variables()
target_model_vars = self.target_model.trainable_variables()
@make_tf_callable(self.get_session())
def update_target_fn(tau):
tau = tf.convert_to_tensor(tau, dtype=tf.float32)
update_target_expr = []
assert len(model_vars) == len(target_model_vars), (
model_vars,
target_model_vars,
)
for var, var_target in zip(model_vars, target_model_vars):
update_target_expr.append(
var_target.assign(tau * var + (1.0 - tau) * var_target)
)
logger.debug("Update target op {}".format(var_target))
return tf.group(*update_target_expr)
# Hard initial update.
self._do_update = update_target_fn
# TODO: The previous SAC implementation does an update(1.0) here.
# If this is changed to tau != 1.0 the sac_loss_function test fails. Why?
# Also the test is not very maintainable, we need to change that unittest
# anyway.
self.update_target(tau=1.0) # self.config.get("tau", 1.0))
@property
def q_func_vars(self):
if not hasattr(self, "_q_func_vars"):
self._q_func_vars = self.model.variables()
return self._q_func_vars
@property
def target_q_func_vars(self):
if not hasattr(self, "_target_q_func_vars"):
self._target_q_func_vars = self.target_model.variables()
return self._target_q_func_vars
# Support both hard and soft sync.
def update_target(self, tau: int = None) -> None:
self._do_update(np.float32(tau or self.config.get("tau", 1.0)))
def variables(self) -> List[TensorType]:
return self.model.variables()
def set_weights(self, weights):
if isinstance(self, TFPolicy):
TFPolicy.set_weights(self, weights)
elif isinstance(self, EagerTFPolicyV2): # Handle TF2V2 policies.
EagerTFPolicyV2.set_weights(self, weights)
elif isinstance(self, EagerTFPolicy): # Handle TF2 policies.
EagerTFPolicy.set_weights(self, weights)
self.update_target(self.config.get("tau", 1.0))
@OldAPIStack
|
TargetNetworkMixin
|
python
|
pypa__warehouse
|
tests/common/db/packaging.py
|
{
"start": 2332,
"end": 3638
}
|
class ____(WarehouseFactory):
class Meta:
model = File
release = factory.SubFactory(ReleaseFactory)
python_version = "source"
# TODO: Replace when factory_boy supports `unique`.
# See https://github.com/FactoryBoy/factory_boy/pull/997
filename = factory.Sequence(lambda _: fake.unique.file_name())
md5_digest = factory.LazyAttribute(
lambda o: hashlib.md5(o.filename.encode("utf8")).hexdigest()
)
sha256_digest = factory.LazyAttribute(
lambda o: hashlib.sha256(o.filename.encode("utf8")).hexdigest()
)
blake2_256_digest = factory.LazyAttribute(
lambda o: hashlib.blake2b(o.filename.encode("utf8"), digest_size=32).hexdigest()
)
upload_time = factory.Faker(
"date_time_between_dates", datetime_start=datetime.datetime(2008, 1, 1)
)
path = factory.LazyAttribute(
lambda o: "/".join(
[
o.blake2_256_digest[:2],
o.blake2_256_digest[2:4],
o.blake2_256_digest[4:],
o.filename,
]
)
)
size = factory.Faker("pyint")
packagetype = factory.LazyAttribute(
lambda _: random.choice(
[
"bdist_wheel",
"sdist",
]
)
)
|
FileFactory
|
python
|
numba__numba
|
numba/cuda/cudamath.py
|
{
"start": 3512,
"end": 3788
}
|
class ____(ConcreteTemplate):
cases = [
signature(types.boolean, types.int64),
signature(types.boolean, types.uint64),
signature(types.boolean, types.float32),
signature(types.boolean, types.float64),
]
@infer_global(math.modf)
|
Math_isnan
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/metrics/meta_metric_provider.py
|
{
"start": 550,
"end": 3256
}
|
class ____(MetaMetricProvider):
"""
Goals:
Instantiation of a deprecated class should raise a warning;
Subclassing of a deprecated class should raise a warning;
Support isinstance and issubclass checks.
"""
# TODO: <Alex>All logging/warning directives should be placed into a common module to be imported as needed.</Alex> # noqa: E501 # FIXME CoP
# deprecated-v0.13.12
warnings.simplefilter("default", category=DeprecationWarning)
# Arguments: True -- suppresses the warnings; False -- outputs the warnings (to stderr).
logging.captureWarnings(False)
def __new__(cls, name, bases, classdict, *args, **kwargs):
alias = classdict.get("_DeprecatedMetaMetricProvider__alias")
if alias is not None:
def new(cls, *args, **kwargs):
alias = cls._DeprecatedMetaMetricProvider__alias
if alias is not None:
# deprecated-v0.13.12
warnings.warn(
f"""{cls.__name__} has been renamed to {alias} -- the alias {cls.__name__} is \
deprecated as of v0.13.12 and will be removed in v0.16.
""", # noqa: E501 # FIXME CoP
DeprecationWarning,
stacklevel=2,
)
return alias(*args, **kwargs)
classdict["__new__"] = new
classdict["_DeprecatedMetaMetricProvider__alias"] = alias
fixed_bases = []
for b in bases:
alias = getattr(b, "_DeprecatedMetaMetricProvider__alias", None)
if alias is not None:
# deprecated-v0.13.12
warnings.warn(
f"""{b.__name__} has been renamed to {alias.__name__} -- the alias {b.__name__} is deprecated \
as of v0.13.12 and will be removed in v0.16.
""", # noqa: E501 # FIXME CoP
DeprecationWarning,
stacklevel=2,
)
# Avoid duplicate base classes.
b = alias or b # noqa: PLW2901 # FIXME CoP
if b not in fixed_bases:
fixed_bases.append(b)
fixed_bases = tuple(fixed_bases)
return super().__new__(cls, name, fixed_bases, classdict, *args, **kwargs)
def __instancecheck__(cls, instance): # type: ignore[explicit-override] # FIXME
return any(cls.__subclasscheck__(c) for c in (type(instance), instance.__class__))
def __subclasscheck__(cls, subclass): # type: ignore[explicit-override] # FIXME
if subclass is cls:
return True
else:
return issubclass(subclass, cls._DeprecatedMetaMetricProvider__alias)
|
DeprecatedMetaMetricProvider
|
python
|
apache__airflow
|
providers/google/tests/unit/google/suite/sensors/test_drive.py
|
{
"start": 1283,
"end": 2197
}
|
class ____:
@mock.patch("airflow.providers.google.suite.sensors.drive.GoogleDriveHook")
def test_should_pass_argument_to_hook(self, mock_hook):
task = GoogleDriveFileExistenceSensor(
task_id="task-id",
folder_id=TEST_FOLDER_ID,
file_name=TEST_FILE_NAME,
drive_id=TEST_DRIVE_ID,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.exists.return_value = True
result = task.poke(mock.MagicMock())
assert result
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.exists.assert_called_once_with(
folder_id=TEST_FOLDER_ID, file_name=TEST_FILE_NAME, drive_id=TEST_DRIVE_ID
)
|
TestGoogleDriveFileSensor
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/aggregations/base_executor.py
|
{
"start": 1513,
"end": 21672
}
|
class ____(Generic[ConnectionType]):
def __init__(
self,
connection: ConnectionType,
name: str,
consistency_level: Optional[ConsistencyLevel],
tenant: Optional[str],
validate_arguments: bool,
) -> None:
self._connection = connection
self._name = name
self._tenant = tenant
self._consistency_level = consistency_level
self._grpc = _AggregateGRPC(
weaviate_version=connection._weaviate_version,
name=name,
tenant=tenant,
consistency_level=consistency_level,
validate_arguments=validate_arguments,
)
def _query(self) -> AggregateBuilder:
return AggregateBuilder(
self._name,
)
def _to_aggregate_result(
self, response: dict, metrics: Optional[List[_Metrics]]
) -> AggregateReturn:
try:
result: dict = response["data"]["Aggregate"][self._name][0]
return AggregateReturn(
properties=(
self.__parse_properties(result, metrics) if metrics is not None else {}
),
total_count=(result["meta"]["count"] if result.get("meta") is not None else None),
)
except KeyError as e:
raise ValueError(
f"There was an error accessing the {e} key when parsing the GraphQL response: {response}"
)
def _to_result(
self, is_groupby: bool, response: aggregate_pb2.AggregateReply
) -> Union[AggregateReturn, AggregateGroupByReturn]:
if not is_groupby:
return AggregateReturn(
properties={
aggregation.property: self.__parse_property_grpc(aggregation)
for aggregation in response.single_result.aggregations.aggregations
},
total_count=response.single_result.objects_count,
)
if is_groupby:
return AggregateGroupByReturn(
groups=[
AggregateGroup(
grouped_by=self.__parse_grouped_by_value(group.grouped_by),
properties={
aggregation.property: self.__parse_property_grpc(aggregation)
for aggregation in group.aggregations.aggregations
},
total_count=group.objects_count,
)
for group in response.grouped_results.groups
]
)
def __parse_grouped_by_value(
self, grouped_by: aggregate_pb2.AggregateReply.Group.GroupedBy
) -> GroupedBy:
value: Union[
str,
int,
float,
bool,
List[str],
List[int],
List[float],
List[bool],
GeoCoordinate,
None,
]
if grouped_by.HasField("text"):
value = grouped_by.text
elif grouped_by.HasField("int"):
value = grouped_by.int
elif grouped_by.HasField("number"):
value = grouped_by.number
elif grouped_by.HasField("boolean"):
value = grouped_by.boolean
elif grouped_by.HasField("texts"):
value = list(grouped_by.texts.values)
elif grouped_by.HasField("ints"):
value = list(grouped_by.ints.values)
elif grouped_by.HasField("numbers"):
value = list(grouped_by.numbers.values)
elif grouped_by.HasField("booleans"):
value = list(grouped_by.booleans.values)
elif grouped_by.HasField("geo"):
v = grouped_by.geo
value = GeoCoordinate(
latitude=v.latitude,
longitude=v.longitude,
)
else:
value = None
_Warnings.unknown_type_encountered(grouped_by.WhichOneof("value"))
return GroupedBy(prop=grouped_by.path[0], value=value)
def _to_group_by_result(
self, response: dict, metrics: Optional[List[_Metrics]]
) -> AggregateGroupByReturn:
try:
results: dict = response["data"]["Aggregate"][self._name]
return AggregateGroupByReturn(
groups=[
AggregateGroup(
grouped_by=GroupedBy(
prop=result["groupedBy"]["path"][0],
value=result["groupedBy"]["value"],
),
properties=(
self.__parse_properties(result, metrics) if metrics is not None else {}
),
total_count=(
result["meta"]["count"] if result.get("meta") is not None else None
),
)
for result in results
]
)
except KeyError as e:
raise ValueError(
f"There was an error accessing the {e} key when parsing the GraphQL response: {response}"
)
def __parse_properties(self, result: dict, metrics: List[_Metrics]) -> AProperties:
props: AProperties = {}
for metric in metrics:
if metric.property_name in result:
props[metric.property_name] = self.__parse_property_gql(
result[metric.property_name], metric
)
return props
@staticmethod
def __parse_property_gql(property_: dict, metric: _Metrics) -> AggregateResult:
if isinstance(metric, _MetricsText):
return AggregateText(
count=property_.get("count"),
top_occurrences=[
TopOccurrence(
count=cast(dict, top_occurrence).get("occurs"),
value=cast(dict, top_occurrence).get("value"),
)
for top_occurrence in property_.get("topOccurrences", [])
],
)
elif isinstance(metric, _MetricsInteger):
return AggregateInteger(
count=property_.get("count"),
maximum=property_.get("maximum"),
mean=property_.get("mean"),
median=property_.get("median"),
minimum=property_.get("minimum"),
mode=property_.get("mode"),
sum_=property_.get("sum"),
)
elif isinstance(metric, _MetricsNumber):
return AggregateNumber(
count=property_.get("count"),
maximum=property_.get("maximum"),
mean=property_.get("mean"),
median=property_.get("median"),
minimum=property_.get("minimum"),
mode=property_.get("mode"),
sum_=property_.get("sum"),
)
elif isinstance(metric, _MetricsBoolean):
return AggregateBoolean(
count=property_.get("count"),
percentage_false=property_.get("percentageFalse"),
percentage_true=property_.get("percentageTrue"),
total_false=property_.get("totalFalse"),
total_true=property_.get("totalTrue"),
)
elif isinstance(metric, _MetricsDate):
return AggregateDate(
count=property_.get("count"),
maximum=property_.get("maximum"),
median=property_.get("median"),
minimum=property_.get("minimum"),
mode=property_.get("mode"),
)
elif isinstance(metric, _MetricsReference):
return AggregateReference(pointing_to=property_.get("pointingTo"))
else:
raise ValueError(
f"Unknown aggregation type {metric} encountered in _Aggregate.__parse_property() for property {property_}"
)
@staticmethod
def __parse_property_grpc(
aggregation: aggregate_pb2.AggregateReply.Aggregations.Aggregation,
) -> AggregateResult:
if aggregation.HasField("text"):
return AggregateText(
count=aggregation.text.count,
top_occurrences=[
TopOccurrence(
count=top_occurrence.occurs,
value=top_occurrence.value,
)
for top_occurrence in aggregation.text.top_occurences.items
],
)
elif aggregation.HasField("int"):
return AggregateInteger(
count=aggregation.int.count,
maximum=aggregation.int.maximum,
mean=aggregation.int.mean,
median=aggregation.int.median,
minimum=aggregation.int.minimum,
mode=aggregation.int.mode,
sum_=aggregation.int.sum,
)
elif aggregation.HasField("number"):
return AggregateNumber(
count=aggregation.number.count,
maximum=aggregation.number.maximum,
mean=aggregation.number.mean,
median=aggregation.number.median,
minimum=aggregation.number.minimum,
mode=aggregation.number.mode,
sum_=aggregation.number.sum,
)
elif aggregation.HasField("boolean"):
return AggregateBoolean(
count=aggregation.boolean.count,
percentage_false=aggregation.boolean.percentage_false,
percentage_true=aggregation.boolean.percentage_true,
total_false=aggregation.boolean.total_false,
total_true=aggregation.boolean.total_true,
)
elif aggregation.HasField("date"):
return AggregateDate(
count=aggregation.date.count,
maximum=aggregation.date.maximum,
median=aggregation.date.median,
minimum=aggregation.date.minimum,
mode=aggregation.date.mode,
)
elif aggregation.HasField("reference"):
return AggregateReference(pointing_to=list(aggregation.reference.pointing_to))
else:
raise ValueError(
f"Unknown aggregation type {aggregation} encountered in _Aggregate.__parse_property_grpc()"
)
@staticmethod
def _add_groupby_to_builder(
builder: AggregateBuilder, group_by: Union[str, GroupByAggregate, None]
) -> AggregateBuilder:
_validate_input(_ValidateArgument([str, GroupByAggregate, None], "group_by", group_by))
if group_by is None:
return builder
if isinstance(group_by, str):
group_by = GroupByAggregate(prop=group_by)
builder = builder.with_group_by_filter([group_by.prop])
if group_by.limit is not None:
builder = builder.with_limit(group_by.limit)
return builder.with_fields(" groupedBy { path value } ")
def _base(
self,
return_metrics: Optional[List[_Metrics]],
filters: Optional[_Filters],
total_count: bool,
) -> AggregateBuilder:
_validate_input(
[
_ValidateArgument([List[_Metrics], None], "return_metrics", return_metrics),
_ValidateArgument([_Filters, None], "filters", filters),
_ValidateArgument([bool], "total_count", total_count),
]
)
builder = self._query()
if return_metrics is not None:
builder = builder.with_fields(" ".join([metric.to_gql() for metric in return_metrics]))
if filters is not None:
builder = builder.with_where(_FilterToREST.convert(filters))
if total_count:
builder = builder.with_meta_count()
if self._tenant is not None:
builder = builder.with_tenant(self._tenant)
return builder
def _do(self, query: AggregateBuilder) -> executor.Result[dict]:
def resp(res: Response) -> dict:
data = _decode_json_response_dict(res, "Query was not successful")
assert data is not None
if (errs := data.get("errors")) is not None:
if "Unexpected empty IN" in errs[0]["message"]:
raise WeaviateQueryError(
"The query that you sent had no body so GraphQL was unable to parse it. You must provide at least one option to the aggregation method in order to build a valid query.",
"GQL Aggregate",
)
raise WeaviateQueryError(
f"Error in GraphQL response: {json.dumps(errs, indent=2)}, for the following query: {query.build()}",
"GQL Aggregate",
)
return data
return executor.execute(
response_callback=resp,
method=self._connection.post,
path="/graphql",
weaviate_object={"query": query.build()},
)
@staticmethod
def _parse_near_options(
certainty: Optional[NUMBER],
distance: Optional[NUMBER],
object_limit: Optional[int],
) -> None:
_validate_input(
[
_ValidateArgument([int, float, None], "certainty", certainty),
_ValidateArgument([int, float, None], "distance", distance),
_ValidateArgument([int, None], "object_limit", object_limit),
]
)
@staticmethod
def _add_hybrid_to_builder(
builder: AggregateBuilder,
query: Optional[str],
alpha: Optional[NUMBER],
vector: Optional[List[float]],
query_properties: Optional[List[str]],
object_limit: Optional[int],
target_vector: Optional[str],
max_vector_distance: Optional[NUMBER],
) -> AggregateBuilder:
payload: dict = {}
if query is not None:
payload["query"] = query
if alpha is not None:
payload["alpha"] = alpha
if vector is not None:
payload["vector"] = vector
if query_properties is not None:
payload["properties"] = query_properties
if target_vector is not None:
payload["targetVectors"] = [target_vector]
if max_vector_distance is not None:
payload["maxVectorDistance"] = max_vector_distance
builder = builder.with_hybrid(payload)
if object_limit is not None:
builder = builder.with_object_limit(object_limit)
return builder
@staticmethod
def _add_near_image_to_builder(
builder: AggregateBuilder,
near_image: Union[str, pathlib.Path, io.BufferedReader],
certainty: Optional[NUMBER],
distance: Optional[NUMBER],
object_limit: Optional[int],
target_vector: Optional[str],
) -> AggregateBuilder:
if all([certainty is None, distance is None, object_limit is None]):
raise WeaviateInvalidInputError(
"You must provide at least one of the following arguments: certainty, distance, object_limit when vector searching"
)
_validate_input(
_ValidateArgument([str, pathlib.Path, io.BufferedReader], "near_image", near_image)
)
_BaseExecutor._parse_near_options(certainty, distance, object_limit)
payload: dict = {}
payload["image"] = parse_blob(near_image)
if certainty is not None:
payload["certainty"] = certainty
if distance is not None:
payload["distance"] = distance
if target_vector is not None:
payload["targetVector"] = target_vector
builder = builder.with_near_image(payload, encode=False)
if object_limit is not None:
builder = builder.with_object_limit(object_limit)
return builder
@staticmethod
def _add_near_object_to_builder(
builder: AggregateBuilder,
near_object: UUID,
certainty: Optional[NUMBER],
distance: Optional[NUMBER],
object_limit: Optional[int],
target_vector: Optional[str],
) -> AggregateBuilder:
if all([certainty is None, distance is None, object_limit is None]):
raise WeaviateInvalidInputError(
"You must provide at least one of the following arguments: certainty, distance, object_limit when vector searching"
)
_validate_input(_ValidateArgument([UUID], "near_object", near_object))
_BaseExecutor._parse_near_options(certainty, distance, object_limit)
payload: dict = {}
payload["id"] = str(near_object)
if certainty is not None:
payload["certainty"] = certainty
if distance is not None:
payload["distance"] = distance
if target_vector is not None:
payload["targetVector"] = target_vector
builder = builder.with_near_object(payload)
if object_limit is not None:
builder = builder.with_object_limit(object_limit)
return builder
@staticmethod
def _add_near_text_to_builder(
builder: AggregateBuilder,
query: Union[List[str], str],
certainty: Optional[NUMBER],
distance: Optional[NUMBER],
move_to: Optional[Move],
move_away: Optional[Move],
object_limit: Optional[int],
target_vector: Optional[str],
) -> AggregateBuilder:
if all([certainty is None, distance is None, object_limit is None]):
raise WeaviateInvalidInputError(
"You must provide at least one of the following arguments: certainty, distance, object_limit when vector searching"
)
_validate_input(
[
_ValidateArgument([List[str], str], "query", query),
_ValidateArgument([Move, None], "move_to", move_to),
_ValidateArgument([Move, None], "move_away", move_away),
_ValidateArgument([str, None], "target_vector", target_vector),
]
)
_BaseExecutor._parse_near_options(certainty, distance, object_limit)
payload: dict = {}
payload["concepts"] = query if isinstance(query, list) else [query]
if certainty is not None:
payload["certainty"] = certainty
if distance is not None:
payload["distance"] = distance
if move_to is not None:
payload["moveTo"] = move_to._to_gql_payload()
if move_away is not None:
payload["moveAwayFrom"] = move_away._to_gql_payload()
if target_vector is not None:
payload["targetVector"] = target_vector
builder = builder.with_near_text(payload)
if object_limit is not None:
builder = builder.with_object_limit(object_limit)
return builder
@staticmethod
def _add_near_vector_to_builder(
builder: AggregateBuilder,
near_vector: List[float],
certainty: Optional[NUMBER],
distance: Optional[NUMBER],
object_limit: Optional[int],
target_vector: Optional[str],
) -> AggregateBuilder:
if all([certainty is None, distance is None, object_limit is None]):
raise WeaviateInvalidInputError(
"You must provide at least one of the following arguments: certainty, distance, object_limit when vector searching"
)
_validate_input(_ValidateArgument([list], "near_vector", near_vector))
_BaseExecutor._parse_near_options(certainty, distance, object_limit)
payload: dict = {}
payload["vector"] = near_vector
if certainty is not None:
payload["certainty"] = certainty
if distance is not None:
payload["distance"] = distance
if target_vector is not None:
payload["targetVector"] = target_vector
builder = builder.with_near_vector(payload)
if object_limit is not None:
builder = builder.with_object_limit(object_limit)
return builder
|
_BaseExecutor
|
python
|
pypa__installer
|
src/installer/exceptions.py
|
{
"start": 41,
"end": 134
}
|
class ____(Exception):
"""All exceptions raised from this package's code."""
|
InstallerError
|
python
|
kamyu104__LeetCode-Solutions
|
Python/reward-top-k-students.py
|
{
"start": 139,
"end": 1912
}
|
class ____(object):
def topStudents(self, positive_feedback, negative_feedback, report, student_id, k):
"""
:type positive_feedback: List[str]
:type negative_feedback: List[str]
:type report: List[str]
:type student_id: List[int]
:type k: int
:rtype: List[int]
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
pos, neg = set(positive_feedback), set(negative_feedback)
arr = []
for i, r in itertools.izip(student_id, report):
score = sum(3 if w in pos else -1 if w in neg else 0 for w in r.split())
arr.append((-score, i))
nth_element(arr, k-1)
return [i for _, i in sorted(arr[:k])]
|
Solution
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/auth/managers/simple/simple_auth_manager.py
|
{
"start": 2159,
"end": 2934
}
|
class ____(namedtuple("SimpleAuthManagerRole", "name order"), Enum):
"""
List of pre-defined roles in simple auth manager.
The first attribute defines the name that references this role in the config.
The second attribute defines the order between roles. The role with order X means it grants access to
resources under its umbrella and all resources under the umbrella of roles of lower order
"""
# VIEWER role gives all read-only permissions
VIEWER = "VIEWER", 0
# USER role gives viewer role permissions + access to DAGs
USER = "USER", 1
# OP role gives user role permissions + access to connections, config, pools, variables
OP = "OP", 2
# ADMIN role gives all permissions
ADMIN = "ADMIN", 3
|
SimpleAuthManagerRole
|
python
|
huggingface__transformers
|
examples/modular-transformers/configuration_my_new_model2.py
|
{
"start": 724,
"end": 5345
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Gemma-7B.
e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GemmaModel`]
```python
>>> from transformers import GemmaModel, GemmaConfig
>>> # Initializing a Gemma gemma-7b style configuration
>>> configuration = GemmaConfig()
>>> # Initializing a model from the gemma-7b style configuration
>>> model = GemmaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "my_new_model2"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `MyNewModel2Model`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 32000,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 11008,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 2048,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
pretraining_tp: Optional[int] = 1,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
head_dim: Optional[int] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
# Try to set `rope_scaling` if available, otherwise use `rope_parameters`
rope_scaling = kwargs.pop("rope_scaling", None)
self.rope_parameters = rope_scaling or rope_parameters
# Validate the correctness of rotary position embeddings parameters
rope_theta = kwargs.get("rope_theta", 10000.0)
standardize_rope_params(self, rope_theta=rope_theta)
rope_config_validation(self)
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
|
MyNewModel2Config
|
python
|
redis__redis-py
|
tests/test_asyncio/test_search.py
|
{
"start": 1535,
"end": 4206
}
|
class ____:
@pytest_asyncio.fixture()
async def decoded_r(self, create_redis, stack_url):
return await create_redis(decode_responses=True, url=stack_url)
@staticmethod
async def waitForIndex(env, idx, timeout=None):
delay = 0.1
while True:
try:
res = await env.execute_command("FT.INFO", idx)
if int(res[res.index("indexing") + 1]) == 0:
break
except ValueError:
break
except AttributeError:
try:
if int(res["indexing"]) == 0:
break
except ValueError:
break
except ResponseError:
# index doesn't exist yet
# continue to sleep and try again
pass
await asyncio.sleep(delay)
if timeout is not None:
timeout -= delay
if timeout <= 0:
break
@staticmethod
def getClient(decoded_r: redis.Redis):
"""
Gets a client client attached to an index name which is ready to be
created
"""
return decoded_r
@staticmethod
async def createIndex(decoded_r, num_docs=100, definition=None):
try:
await decoded_r.create_index(
(
TextField("play", weight=5.0),
TextField("txt"),
NumericField("chapter"),
),
definition=definition,
)
except redis.ResponseError:
await decoded_r.dropindex(delete_documents=True)
return await AsyncSearchTestsBase.createIndex(
decoded_r, num_docs=num_docs, definition=definition
)
chapters = {}
bzfp = TextIOWrapper(bz2.BZ2File(WILL_PLAY_TEXT), encoding="utf8")
r = csv.reader(bzfp, delimiter=";")
for n, line in enumerate(r):
play, chapter, _, text = line[1], line[2], line[4], line[5]
key = f"{play}:{chapter}".lower()
d = chapters.setdefault(key, {})
d["play"] = play
d["txt"] = d.get("txt", "") + " " + text
d["chapter"] = int(chapter or 0)
if len(chapters) == num_docs:
break
indexer = decoded_r.batch_indexer(chunk_size=50)
assert isinstance(indexer, AsyncSearch.BatchIndexer)
assert 50 == indexer.chunk_size
for key, doc in chapters.items():
await indexer.client.client.hset(key, mapping=doc)
await indexer.commit()
|
AsyncSearchTestsBase
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_validation.py
|
{
"start": 383,
"end": 15762
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expression': 'str',
'message': 'str',
'message_expression': 'str',
'reason': 'str'
}
attribute_map = {
'expression': 'expression',
'message': 'message',
'message_expression': 'messageExpression',
'reason': 'reason'
}
def __init__(self, expression=None, message=None, message_expression=None, reason=None, local_vars_configuration=None): # noqa: E501
"""V1Validation - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expression = None
self._message = None
self._message_expression = None
self._reason = None
self.discriminator = None
self.expression = expression
if message is not None:
self.message = message
if message_expression is not None:
self.message_expression = message_expression
if reason is not None:
self.reason = reason
@property
def expression(self):
"""Gets the expression of this V1Validation. # noqa: E501
Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\", \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\". Examples: - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"} - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"} - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"} Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and non-intersecting elements in `Y` are appended, retaining their partial order. - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are appended, retaining their partial order. Required. # noqa: E501
:return: The expression of this V1Validation. # noqa: E501
:rtype: str
"""
return self._expression
@expression.setter
def expression(self, expression):
"""Sets the expression of this V1Validation.
Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\", \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\". Examples: - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"} - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"} - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"} Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and non-intersecting elements in `Y` are appended, retaining their partial order. - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are appended, retaining their partial order. Required. # noqa: E501
:param expression: The expression of this V1Validation. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
self._expression = expression
@property
def message(self):
"""Gets the message of this V1Validation. # noqa: E501
Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\". # noqa: E501
:return: The message of this V1Validation. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1Validation.
Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\". # noqa: E501
:param message: The message of this V1Validation. # noqa: E501
:type: str
"""
self._message = message
@property
def message_expression(self):
"""Gets the message_expression of this V1Validation. # noqa: E501
messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\" # noqa: E501
:return: The message_expression of this V1Validation. # noqa: E501
:rtype: str
"""
return self._message_expression
@message_expression.setter
def message_expression(self, message_expression):
"""Sets the message_expression of this V1Validation.
messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\" # noqa: E501
:param message_expression: The message_expression of this V1Validation. # noqa: E501
:type: str
"""
self._message_expression = message_expression
@property
def reason(self):
"""Gets the reason of this V1Validation. # noqa: E501
Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client. # noqa: E501
:return: The reason of this V1Validation. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1Validation.
Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client. # noqa: E501
:param reason: The reason of this V1Validation. # noqa: E501
:type: str
"""
self._reason = reason
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Validation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Validation):
return True
return self.to_dict() != other.to_dict()
|
V1Validation
|
python
|
getsentry__sentry
|
tests/acceptance/test_replay_list.py
|
{
"start": 338,
"end": 7039
}
|
class ____(ReplaysAcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=None)
self.team = self.create_team(organization=self.org, name="Mariachi Band 1")
self.project = self.create_project(
organization=self.org,
teams=[self.team],
name="Bengal",
flags=Project.flags.has_replays,
)
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
self.login_as(self.user)
self.path = f"/organizations/{self.org.slug}/explore/replays/"
self.header_fields = [
"Replay",
"OS",
"Browser",
"Duration",
"Dead clicks",
"Rage clicks",
"Errors",
"Activity",
]
def assert_replay_table_renders(self) -> None:
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until_not('[data-test-id="loading-placeholder"]')
self.browser.wait_until_not('[data-test-id="replay-table-loading"]')
assert not self.browser.element_exists_by_test_id("replay-table-errored")
def test_empty(self) -> None:
with self.feature(FEATURE_NAME):
self.browser.get(self.path)
self.assert_replay_table_renders()
rows = self.browser.elements('[data-test-id="replay-table"] [role="row"]')
assert len(rows) == 1
for field in self.header_fields:
assert field in rows[0].text
def test_simple(self) -> None:
seq1_timestamp = datetime.now() - timedelta(minutes=10, seconds=52)
seq2_timestamp = datetime.now() - timedelta(minutes=10, seconds=35)
replay_ids = [
uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
]
for i, replay_id in enumerate(replay_ids):
self.store_replays(
[
mock_replay(
seq1_timestamp - timedelta(seconds=i * 10),
self.project.id,
replay_id,
segment_id=0,
urls=[
"http://localhost/",
"http://localhost/home/",
"http://localhost/profile/",
],
),
mock_replay(
seq2_timestamp - timedelta(seconds=i * 10),
self.project.id,
replay_id,
segment_id=1,
),
]
)
with self.feature(FEATURE_NAME):
self.browser.get(self.path)
self.assert_replay_table_renders()
rows = self.browser.elements('[data-test-id="replay-table"] [role="row"]')
assert len(rows) == 4
for field in self.header_fields:
assert field in rows[0].text
assert replay_ids[0][:8] in rows[1].text
assert replay_ids[1][:8] in rows[2].text
assert replay_ids[2][:8] in rows[3].text
def test_archived(self) -> None:
seq1_timestamp = datetime.now() - timedelta(minutes=10, seconds=52)
seq2_timestamp = datetime.now() - timedelta(minutes=10, seconds=35)
replay_id = uuid.uuid4().hex
self.store_replays(
[
mock_replay(
seq1_timestamp,
self.project.id,
replay_id,
),
mock_replay(
seq2_timestamp,
self.project.id,
replay_id,
is_archived=True,
),
]
)
with self.feature(FEATURE_NAME):
self.browser.get(self.path)
self.assert_replay_table_renders()
rows = self.browser.elements('[data-test-id="replay-table"] [role="row"]')
assert len(rows) == 2
for field in self.header_fields:
assert field in rows[0].text
assert replay_id[:8] in rows[1].text
assert "Deleted Replay" in rows[1].text
def test_viewed_indicator_has_viewed(self) -> None:
seq1_timestamp = datetime.now() - timedelta(minutes=10, seconds=52)
seq2_timestamp = datetime.now() - timedelta(minutes=10, seconds=35)
replay_id = uuid.uuid4().hex
self.store_replays(
[
mock_replay(
seq1_timestamp,
self.project.id,
replay_id,
),
mock_replay_viewed(
seq2_timestamp.timestamp(),
self.project.id,
replay_id,
self.user.id,
),
]
)
with self.feature(FEATURE_NAME):
self.browser.get(self.path)
self.assert_replay_table_renders()
rows = self.browser.elements('[data-test-id="replay-table"] [role="row"]')
for field in self.header_fields:
assert field in rows[0].text
assert replay_id[:8] in rows[1].text
assert not self.browser.element_exists(
'[data-test-id="replay-table"][role="row"][data-has-viewed="true"]'
)
def test_viewed_indicator_not_viewed(self) -> None:
seq1_timestamp = datetime.now() - timedelta(minutes=10, seconds=52)
seq2_timestamp = datetime.now() - timedelta(minutes=10, seconds=35)
replay_id = uuid.uuid4().hex
self.store_replays(
[
mock_replay(
seq1_timestamp,
self.project.id,
replay_id,
),
mock_replay(
seq2_timestamp,
self.project.id,
replay_id,
),
]
)
with self.feature(FEATURE_NAME):
self.browser.get(self.path)
self.assert_replay_table_renders()
rows = self.browser.elements('[data-test-id="replay-table"] [role="row"]')
for field in self.header_fields:
assert field in rows[0].text
assert replay_id[:8] in rows[1].text
assert not self.browser.element_exists(
'[data-test-id="replay-table"][role="row"][data-has-viewed="false"]'
)
|
ReplayListTest
|
python
|
ApeWorX__ape
|
src/ape/api/accounts.py
|
{
"start": 1335,
"end": 25693
}
|
class ____(BaseInterfaceModel, BaseAddress):
"""
An API class representing an account.
"""
def __dir__(self) -> list[str]:
"""
Display methods to IPython on ``a.[TAB]`` tab completion.
Returns:
list[str]: Method names that IPython uses for tab completion.
"""
base_value_excludes = ("code", "codesize", "is_contract") # Not needed for accounts
base_values = [v for v in self._base_dir_values if v not in base_value_excludes]
return base_values + [
self.__class__.alias.fget.__name__, # type: ignore[attr-defined]
self.__class__.call.__name__,
self.__class__.deploy.__name__,
self.__class__.prepare_transaction.__name__,
self.__class__.sign_authorization.__name__,
self.__class__.sign_message.__name__,
self.__class__.sign_transaction.__name__,
self.__class__.transfer.__name__,
self.__class__.delegate.fget.__name__, # type: ignore[attr-defined]
self.__class__.set_delegate.__name__,
self.__class__.remove_delegate.__name__,
self.__class__.delegate_to.__name__,
]
@property
def alias(self) -> Optional[str]:
"""
A shortened-name for quicker access to the account.
"""
return None
@property
def public_key(self) -> Optional["HexBytes"]:
"""
The public key for the account.
```{notice}
Account might not have this property if feature is unsupported or inaccessible.
```
"""
return None
def prepare_transaction(self, txn: "TransactionAPI", **kwargs) -> "TransactionAPI":
sign = kwargs.pop("sign", False)
prepared_tx = super().prepare_transaction(txn, **kwargs)
return (self.sign_transaction(prepared_tx) or prepared_tx) if sign else prepared_tx
def sign_raw_msghash(self, msghash: "HexBytes") -> Optional[MessageSignature]:
"""
Sign a raw message hash.
Args:
msghash (:class:`~eth_pydantic_types.HexBytes`):
The message hash to sign. Plugins may or may not support this operation.
Default implementation is to raise ``APINotImplementedError``.
Returns:
:class:`~ape.types.signatures.MessageSignature` (optional):
The signature corresponding to the message.
"""
raise APINotImplementedError(
f"Raw message signing is not supported by '{self.__class__.__name__}'"
)
def sign_authorization(
self,
address: Any,
chain_id: Optional[int] = None,
nonce: Optional[int] = None,
) -> Optional[MessageSignature]:
"""
Sign an `EIP-7702 <https://eips.ethereum.org/EIPS/eip-7702>`__ Authorization.
Args:
address (Any): A delegate address to sign the authorization for.
chain_id (Optional[int]):
The chain ID that the authorization should be valid for.
A value of ``0`` means that the authorization is valid for **any chain**.
Default tells implementation to use the currently connected network's ``chain_id``.
nonce (Optional[int]):
The nonce to use to sign authorization with. Defaults to account's current nonce.
Returns:
:class:`~ape.types.signatures.MessageSignature` (optional):
The signature corresponding to the message.
```{caution}
This action has the capability to be extremely destructive to the signer, and might lead to
full account compromise. All implementations are recommended to ensure that the signer be
made aware of the severity and impact of this action through some callout.
```
"""
raise APINotImplementedError(
f"Authorization signing is not supported by '{self.__class__.__name__}'"
)
@abstractmethod
def sign_message(self, msg: Any, **signer_options) -> Optional[MessageSignature]:
"""
Sign a message.
Args:
msg (Any): The message to sign. Account plugins can handle various types of messages.
For example, :class:`~ape_accounts.accounts.KeyfileAccount` can handle
:class:`~ape.types.signatures.SignableMessage`, str, int, and bytes.
See these
`docs <https://eth-account.readthedocs.io/en/stable/eth_account.html#eth_account.messages.SignableMessage>`__ # noqa: E501
for more type information on the :class:`~ape.types.signatures.SignableMessage` type.
**signer_options: Additional kwargs given to the signer to modify the signing operation.
Returns:
:class:`~ape.types.signatures.MessageSignature` (optional): The signature corresponding to the message.
"""
@abstractmethod
def sign_transaction(self, txn: TransactionAPI, **signer_options) -> Optional[TransactionAPI]:
"""
Sign a transaction.
Args:
txn (:class:`~ape.api.transactions.TransactionAPI`): The transaction to sign.
**signer_options: Additional kwargs given to the signer to modify the signing operation.
Returns:
:class:`~ape.api.transactions.TransactionAPI` (optional): A signed transaction.
The ``TransactionAPI`` returned by this method may not correspond to ``txn`` given as
input, however returning a properly-formatted transaction here is meant to be executed.
Returns ``None`` if the account does not have a transaction it wishes to execute.
"""
def call(
self,
txn: TransactionAPI,
send_everything: bool = False,
private: bool = False,
sign: bool = True,
**signer_options,
) -> ReceiptAPI:
"""
Make a transaction call.
Raises:
:class:`~ape.exceptions.AccountsError`: When the nonce is invalid or the sender does
not have enough funds.
:class:`~ape.exceptions.TransactionError`: When the required confirmations are negative.
:class:`~ape.exceptions.SignatureError`: When the user does not sign the transaction.
:class:`~ape.exceptions.APINotImplementedError`: When setting ``private=True`` and using
a provider that does not support private transactions.
Args:
txn (:class:`~ape.api.transactions.TransactionAPI`): An invoke-transaction.
send_everything (bool): ``True`` will send the difference from balance and fee.
Defaults to ``False``.
private (bool): ``True`` will use the
:meth:`~ape.api.providers.ProviderAPI.send_private_transaction` method.
sign (bool): ``False`` to not sign the transaction (useful for providers like Titanoboa
which still use a sender but don't need to sign).
**signer_options: Additional kwargs given to the signer to modify the signing operation.
Returns:
:class:`~ape.api.transactions.ReceiptAPI`
"""
txn = self.prepare_transaction(txn)
max_fee = txn.max_fee
gas_limit = txn.gas_limit
if not isinstance(gas_limit, int):
raise TransactionError("Transaction not prepared.")
# The conditions below should never reached but are here for mypy's sake.
# The `max_fee` was either set manually or from `prepare_transaction()`.
# The `gas_limit` was either set manually or from `prepare_transaction()`.
if max_fee is None:
raise TransactionError("`max_fee` failed to get set in transaction preparation.")
elif gas_limit is None:
raise TransactionError("`gas_limit` failed to get set in transaction preparation.")
total_fees = max_fee * gas_limit
# Send the whole balance.
if send_everything:
amount_to_send = self.balance - total_fees
if amount_to_send <= 0:
raise AccountsError(
f"Sender does not have enough to cover transaction value and gas: {total_fees}"
)
else:
txn.value = amount_to_send
if sign:
prepared_txn = self.sign_transaction(txn, **signer_options)
if not prepared_txn:
raise SignatureError("The transaction was not signed.", transaction=txn)
else:
prepared_txn = txn
if not prepared_txn.sender:
prepared_txn.sender = self.address
return (
self.provider.send_private_transaction(prepared_txn)
if private
else self.provider.send_transaction(prepared_txn)
)
def transfer(
self,
account: Union[str, AddressType, BaseAddress],
value: Optional[Union[str, int]] = None,
data: Optional[Union[bytes, str]] = None,
private: bool = False,
**kwargs,
) -> ReceiptAPI:
"""
Send funds to an account.
Raises:
:class:`~ape.exceptions.APINotImplementedError`: When setting ``private=True``
and using a provider that does not support private transactions.
Args:
account (Union[str, AddressType, BaseAddress]): The receiver of the funds.
value (Optional[Union[str, int]]): The amount to send.
data (Optional[Union[bytes, str]]): Extra data to include in the transaction.
private (bool): ``True`` asks the provider to make the transaction
private. For example, EVM providers typically use the RPC
``eth_sendPrivateTransaction`` to achieve this. Local providers may ignore
this value.
**kwargs: Additional transaction kwargs passed to
:meth:`~ape.api.networks.EcosystemAPI.create_transaction`, such as ``gas``
``max_fee``, or ``max_priority_fee``. For a list of available transaction
kwargs, see :class:`~ape.api.transactions.TransactionAPI`.
Returns:
:class:`~ape.api.transactions.ReceiptAPI`
"""
if isinstance(account, int):
raise AccountsError(
"Cannot use integer-type for the `receiver` argument in the "
"`.transfer()` method (this protects against accidentally passing "
"the `value` as the `receiver`)."
)
try:
receiver = self.conversion_manager.convert(account, AddressType)
except ConversionError as err:
raise AccountsError(f"Invalid `receiver` value: '{account}'.") from err
txn = self.provider.network.ecosystem.create_transaction(
sender=self.address, receiver=receiver, **kwargs
)
if data:
txn.data = self.conversion_manager.convert(data, bytes)
if value is None and not kwargs.get("send_everything"):
raise AccountsError("Must provide 'VALUE' or use 'send_everything=True'")
elif value is not None and kwargs.get("send_everything"):
raise AccountsError("Cannot use 'send_everything=True' with 'VALUE'.")
elif value is not None:
txn.value = self.conversion_manager.convert(value, int)
if txn.value < 0:
raise AccountsError("Value cannot be negative.")
return self.call(txn, private=private, **kwargs)
def deploy(
self, contract: "ContractContainer", *args, publish: bool = False, **kwargs
) -> "ContractInstance":
"""
Create a smart contract on the blockchain. The smart contract must compile before
deploying and a provider must be active.
Args:
contract (:class:`~ape.contracts.base.ContractContainer`): The type of contract to
deploy.
publish (bool): Set to ``True`` to attempt explorer contract verification.
Defaults to ``False``.
Returns:
:class:`~ape.contracts.ContractInstance`: An instance of the deployed contract.
"""
possible_address = self.get_deployment_address()
styled_address = click.style(possible_address, bold=True)
logger.info(f"Contract will be deployed at: {styled_address}")
from ape.contracts import ContractContainer
if isinstance(contract, ContractType):
# Hack to allow deploying ContractTypes w/o being
# wrapped in a container first.
contract = ContractContainer(contract)
# NOTE: It is important to type check here to prevent cases where user
# may accidentally pass in a ContractInstance, which has a very
# different implementation for __call__ than ContractContainer.
elif not isinstance(contract, ContractContainer):
raise TypeError(
"contract argument must be a ContractContainer type, "
"such as 'project.MyContract' where 'MyContract' is the name of "
"a contract in your project."
)
bytecode = contract.contract_type.deployment_bytecode
if not bytecode or bytecode.bytecode in (None, "", "0x"):
raise MissingDeploymentBytecodeError(contract.contract_type)
txn = contract(*args, **kwargs)
if kwargs.get("value") and not contract.contract_type.constructor.is_payable:
raise MethodNonPayableError("Sending funds to a non-payable constructor.")
txn.sender = self.address
receipt = contract._cache_wrap(lambda: self.call(txn, **kwargs))
if not (address := receipt.contract_address):
raise AccountsError(f"'{receipt.txn_hash}' did not create a contract.")
contract_type = contract.contract_type
styled_address = click.style(receipt.contract_address, bold=True)
contract_name = contract_type.name or "<Unnamed Contract>"
logger.success(f"Contract '{contract_name}' deployed to: {styled_address}")
instance = self.chain_manager.contracts.instance_from_receipt(receipt, contract_type)
self.chain_manager.contracts.cache_deployment(instance)
if publish:
self.local_project.deployments.track(instance)
self.provider.network.publish_contract(address)
instance.base_path = contract.base_path or self.local_project.path
return instance
def declare(self, contract: "ContractContainer", *args, **kwargs) -> ReceiptAPI:
"""
Deploy the "blueprint" of a contract type. For EVM providers, this likely means
using `EIP-5202 <https://eips.ethereum.org/EIPS/eip-5202>`__, which is implemented
in the core ``ape-ethereum`` plugin.
Args:
contract (:class:`~ape.contracts.base.ContractContainer`): The contract container
to declare.
Returns:
:class:`~ape.api.transactions.ReceiptAPI`: The receipt of the declare transaction.
"""
transaction = self.provider.network.ecosystem.encode_contract_blueprint(
contract.contract_type, *args, **kwargs
)
receipt = self.call(transaction)
if receipt.contract_address:
self.chain_manager.contracts.cache_blueprint(
receipt.contract_address, contract.contract_type
)
else:
logger.debug("Failed to cache contract declaration: missing contract address.")
return receipt
def check_signature(
self,
data: Union[SignableMessage, TransactionAPI, str, EIP712Message, int, bytes],
signature: Optional[MessageSignature] = None, # TransactionAPI doesn't need it
recover_using_eip191: bool = True,
) -> bool:
"""
Verify a message or transaction was signed by this account.
Args:
data (Union[:class:`~ape.types.signatures.SignableMessage`, :class:`~ape.api.transactions.TransactionAPI`]): # noqa: E501
The message or transaction to verify.
signature (Optional[:class:`~ape.types.signatures.MessageSignature`]):
The signature to check. Defaults to ``None`` and is not needed when the first
argument is a transaction class.
recover_using_eip191 (bool):
Perform recovery using EIP-191 signed message check. If set False, then will attempt
recovery as raw hash. `data`` must be a 32 byte hash if this is set False.
Defaults to ``True``.
Returns:
bool: ``True`` if the data was signed by this account. ``False`` otherwise.
"""
if isinstance(data, str):
data = encode_defunct(text=data)
elif isinstance(data, int):
data = encode_defunct(hexstr=to_hex(data))
elif isinstance(data, bytes) and (len(data) != 32 or recover_using_eip191):
data = encode_defunct(data)
elif isinstance(data, EIP712Message):
data = data.signable_message
if isinstance(data, (SignableMessage, EIP712SignableMessage)):
if signature:
return self.address == Account.recover_message(data, vrs=signature)
else:
raise AccountsError(
"Parameter 'signature' required when verifying a 'SignableMessage'."
)
elif isinstance(data, TransactionAPI):
return self.address == Account.recover_transaction(data.serialize_transaction())
elif isinstance(data, bytes) and len(data) == 32 and not recover_using_eip191:
return self.address == Account._recover_hash(data, vrs=signature)
else:
raise AccountsError(f"Unsupported message type: {type(data)}.")
def get_deployment_address(self, nonce: Optional[int] = None) -> AddressType:
"""
Get a contract address before it is deployed. This is useful
when you need to pass the contract address to another contract
before deploying it.
Args:
nonce (int | None): Optionally provide a nonce. Defaults
the account's current nonce.
Returns:
AddressType: The contract address.
"""
# Use the connected network, if available. Else, default to Ethereum.
ecosystem = (
self.network_manager.active_provider.network.ecosystem
if self.network_manager.active_provider
else self.network_manager.ethereum
)
nonce = self.nonce if nonce is None else nonce
return ecosystem.get_deployment_address(self.address, nonce)
def set_delegate(self, contract: Union[BaseAddress, AddressType, str], **txn_kwargs):
"""
Have the account class override the value of its ``delegate``. For plugins that support
this feature, the way they choose to handle it can vary. For example, it could be a call to
upgrade itself using some built-in method for a smart wallet (with default txn args) e.g.
the Safe smart wallet (https://github.com/ApeWorX/ape-safe), or it could be to use an EIP-
7702-like feature available on the network to set a delegate for that account. However if a
plugin chooses to handle it, the resulting action (if successful) should make sure that the
value that ``self.delegate`` returns is the same as ``contract`` after it is completed.
By default, this method raises ``APINotImplementedError`` signaling that support is not
available for this feature. Calling this may result in other errors if implemented.
Args:
contract (`:class:~ape.contracts.ContractInstance`):
The contract instance to override the delegate with.
**txn_kwargs: Additional transaction kwargs passed to
:meth:`~ape.api.networks.EcosystemAPI.create_transaction`, such as ``gas``
``max_fee``, or ``max_priority_fee``. For a list of available transaction
kwargs, see :class:`~ape.api.transactions.TransactionAPI`.
"""
raise APINotImplementedError
def remove_delegate(self, **txn_kwargs):
"""
Has the account class remove the override for the value of its ``delegate``. For plugins
that support this feature, the way they choose to handle it can vary. For example, on a
network using an EIP7702-like feature available it will reset the delegate to empty.
However, if a plugin chooses to handle it, the resulting action (if successful) should
make sure that the value that ``self.delegate`` returns ``None`` after it is completed.
By default, this method raises ``APINotImplementedError`` signaling that support is not
available for this feature. Calling this may result in other errors if implemented.
Args:
**txn_kwargs: Additional transaction kwargs passed to
:meth:`~ape.api.networks.EcosystemAPI.create_transaction`, such as ``gas``
``max_fee``, or ``max_priority_fee``. For a list of available transaction
kwargs, see :class:`~ape.api.transactions.TransactionAPI`.
"""
raise APINotImplementedError
@contextmanager
def delegate_to(
self,
new_delegate: Union[BaseAddress, AddressType, str],
set_txn_kwargs: Optional[dict] = None,
reset_txn_kwargs: Optional[dict] = None,
**txn_kwargs,
) -> Iterator[BaseAddress]:
"""
Temporarily override the value of ``delegate`` for the account inside of a context manager,
and yields a contract instance object whose interface matches that of ``new_delegate``.
This is useful for ensuring that delegation is only temporarily extended to an account when
doing a critical action temporarily, such as using an EIP7702 delegate module.
Args:
new_delegate (`:class:~ape.contracts.ContractInstance`):
The contract instance to override the `delegate` with.
set_txn_kwargs (dict | None): Additional transaction kwargs passed to
:meth:`~ape.api.networks.EcosystemAPI.create_transaction` for the
:meth:`AccountAPI.set_delegate` method, such as ``gas``, ``max_fee``, or
``max_priority_fee``. Overrides the values provided via ``txn_kwargs``. For a list of
available transaction kwargs, see :class:`~ape.api.transactions.TransactionAPI`.
reset_txn_kwargs (dict | None): Additional transaction kwargs passed to
:meth:`~ape.api.networks.EcosystemAPI.create_transaction` for the
:meth:`AccountAPI.remove_delegate` method, such as ``gas``, ``max_fee``, or
``max_priority_fee``. Overrides the values provided via ``txn_kwargs``. For a list of
available transaction kwargs, see :class:`~ape.api.transactions.TransactionAPI`.
**txn_kwargs: Additional transaction kwargs passed to
:meth:`~ape.api.networks.EcosystemAPI.create_transaction`, such as ``gas``
``max_fee``, or ``max_priority_fee``. For a list of available transaction
kwargs, see :class:`~ape.api.transactions.TransactionAPI`.
Returns:
`:class:~ape.contracts.ContractInstance`:
The contract instance of this account with the interface of `contract`.
"""
set_txn_kwargs = {**txn_kwargs, **(set_txn_kwargs or {})}
existing_delegate = self.delegate
self.set_delegate(new_delegate, **set_txn_kwargs)
# NOTE: Do not cache this type as it is temporary
from ape.contracts import ContractInstance
# This is helpful for using it immediately to send things as self
with self.account_manager.use_sender(self):
if isinstance(new_delegate, ContractInstance):
# NOTE: Do not cache this
yield ContractInstance(self.address, contract_type=new_delegate.contract_type)
else:
yield self
reset_txn_kwargs = {**txn_kwargs, **(reset_txn_kwargs or {})}
if existing_delegate:
self.set_delegate(existing_delegate, **reset_txn_kwargs)
else:
self.remove_delegate(**reset_txn_kwargs)
|
AccountAPI
|
python
|
walkccc__LeetCode
|
solutions/40. Combination Sum II/40.py
|
{
"start": 0,
"end": 583
}
|
class ____:
def combinationSum2(self, candidates: list[int],
target: int) -> list[list[int]]:
ans = []
def dfs(s: int, target: int, path: list[int]) -> None:
if target < 0:
return
if target == 0:
ans.append(path.copy())
return
for i in range(s, len(candidates)):
if i > s and candidates[i] == candidates[i - 1]:
continue
path.append(candidates[i])
dfs(i + 1, target - candidates[i], path)
path.pop()
candidates.sort()
dfs(0, target, [])
return ans
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dbt/dagster_dbt/cloud_v2/types.py
|
{
"start": 3674,
"end": 4136
}
|
class ____:
"""Represents the data of a dbt Cloud workspace, given a project and environment."""
project_id: int
environment_id: int
# The ID of the ad hoc dbt Cloud job created by Dagster.
# This job is used to parse the dbt Cloud project.
# This job is also used to kick off cli invocation if no job ID is specified by users.
adhoc_job_id: int
manifest: Mapping[str, Any]
jobs: Sequence[Mapping[str, Any]]
|
DbtCloudWorkspaceData
|
python
|
walkccc__LeetCode
|
solutions/1722. Minimize Hamming Distance After Swap Operations/1722.py
|
{
"start": 514,
"end": 1181
}
|
class ____:
def minimumHammingDistance(
self,
source: list[int],
target: list[int],
allowedSwaps: list[list[int]],
) -> int:
n = len(source)
ans = 0
uf = UnionFind(n)
groupIdToCount = [collections.Counter() for _ in range(n)]
for a, b in allowedSwaps:
uf.unionByRank(a, b)
for i in range(n):
groupIdToCount[uf.find(i)][source[i]] += 1
for i in range(n):
groupId = uf.find(i)
count = groupIdToCount[groupId]
if target[i] not in count:
ans += 1
else:
count[target[i]] -= 1
if count[target[i]] == 0:
del count[target[i]]
return ans
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/552. Student Attendance Record II/552.py
|
{
"start": 0,
"end": 750
}
|
class ____:
def checkRecord(self, n: int) -> int:
MOD = 1_000_000_007
# dp[i][j] := the length so far with i A's and the last letters are j L's
dp = [[0] * 3 for _ in range(2)]
dp[0][0] = 1
for _ in range(n):
prev = [A[:] for A in dp]
# Append a P.
dp[0][0] = (prev[0][0] + prev[0][1] + prev[0][2]) % MOD
# Append an L.
dp[0][1] = prev[0][0]
# Append an L.
dp[0][2] = prev[0][1]
# Append an A or append a P.
dp[1][0] = (prev[0][0] + prev[0][1] + prev[0][2] +
prev[1][0] + prev[1][1] + prev[1][2]) % MOD
# Append an L.
dp[1][1] = prev[1][0]
# Append an L.
dp[1][2] = prev[1][1]
return (sum(dp[0]) + sum(dp[1])) % MOD
|
Solution
|
python
|
pytorch__pytorch
|
test/dynamo/test_exceptions.py
|
{
"start": 920,
"end": 27316
}
|
class ____(torch._dynamo.test_case.TestCase):
def test_exception(self):
def fn(x):
x = torch.cos(x)
try:
x = torch.sin(x)
raise NotImplementedError
except Exception:
x = torch.sigmoid(x)
return x
x = torch.randn(4)
ref = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_exception2(self):
def fn(x):
x = torch.cos(x)
try:
x = torch.sin(x)
raise NotImplementedError
except (NotImplementedError, AttributeError):
x = torch.sigmoid(x)
return x
x = torch.randn(4)
ref = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_exception3(self):
def fn(x):
x = torch.cos(x)
try:
x = torch.sin(x)
raise NotImplementedError("Not implemented")
except AssertionError:
x = torch.sigmoid(x)
except NotImplementedError:
x = torch.cos(x)
finally:
x = torch.cos(x)
return x
x = torch.randn(4)
ref = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_exception4(self):
def fn(x):
for i in range(10):
if i == 5:
return x
try:
x = torch.sin(x)
raise NotImplementedError
except Exception:
x = torch.sigmoid(x)
return x
x = torch.randn(4)
ref = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_exception_with_another_exception(self):
def fn(x):
x = torch.cos(x)
try:
x = torch.sin(x)
raise NotImplementedError("Not implemented")
except NotImplementedError:
x = torch.sigmoid(x)
try:
x = torch.cos(x)
raise AssertionError # noqa: B904
except AssertionError:
x = torch.cos(x)
x = torch.randn(4)
ref = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_exception_with_vars(self):
def fn(x):
try:
vars(42)
raise RuntimeError("Should not be raised")
except TypeError:
return x.sin()
x = torch.randn(4)
ref = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_autocast_with_exception(self):
class Optimizer(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
raise NotImplementedError("Not implemented")
@staticmethod
def backward(ctx, grad_out):
return grad_out
@torch.compile
def f(x: torch.Tensor):
try:
with torch.autocast(device_type="cpu", dtype=None):
Optimizer.apply(x)
except NotImplementedError:
return x + 1
inp = torch.ones(3)
out = f(inp)
self.assertTrue(torch.equal(out, inp + 1))
@make_dynamo_test
def test_isinstance_CustomException(self):
assert isinstance(CustomException, type)
assert not isinstance(CustomException(), type)
C = CustomExceptionWithInstanceCheck
assert isinstance(C, C)
assert isinstance(C(), C)
@make_dynamo_test
def test_propagate_exception_inside_ctx_manager(self):
@contextlib.contextmanager
def cm():
try:
yield
except BaseException: # noqa: B036
raise ValueError # noqa: B904
@contextlib.contextmanager
def nothing():
try:
yield
finally:
pass
z = 0
with nothing():
try:
with cm():
raise IndexError
except ValueError:
z = 1
except IndexError:
z = 2
assert z == 1
def test_exception_else(self):
def gn(x):
return torch.cos(x)
def fn(x):
x = torch.cos(x)
try:
x = torch.sin(x)
x = gn(x)
except Exception:
x = torch.sigmoid(x)
else:
x = torch.cos(x)
return x
x = torch.randn(4)
ref = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
@make_dynamo_test
def test_raise_match(self):
a = AttributeError
b = BytesWarning
c = ConnectionError
d = DeprecationWarning
e = Exception
def fn(a, b):
try:
raise a
finally:
raise b
def fix_exc_context(frame_exc, new_exc, old_exc):
# slightly change from ExitStack.fix_exc_context function
while 1:
exc_context = new_exc.__context__
if exc_context is None or exc_context is old_exc:
return
if exc_context is frame_exc:
break
new_exc = exc_context
new_exc.__context__ = old_exc
@contextlib.contextmanager
def ctx():
try:
yield
finally:
frame_exc = prev_exc = sys.exc_info()
args = [(d, c), (b, a)]
for x, y in args:
try:
fn(x, y)
except BaseException: # noqa: B036
new_exc = sys.exc_info()
fix_exc_context(frame_exc[1], new_exc[1], prev_exc[1])
prev_exc = new_exc
try:
fixed_ctx = prev_exc[1].__context__
raise prev_exc[1]
except BaseException: # noqa: B036
prev_exc[1].__context__ = fixed_ctx
raise
try:
with ctx():
raise e
except Exception as exc:
assert isinstance(exc, a)
assert isinstance(exc.__context__, b)
assert isinstance(exc.__context__.__context__, c)
assert isinstance(exc.__context__.__context__.__context__, d)
assert isinstance(exc.__context__.__context__.__context__.__context__, e)
# TODO(anijain2305) - does not work with fullgraph=True
def test_exception_with_another_exception2(self):
def gn(x):
try:
x = torch.cos(x)
raise NotImplementedError("Not implemented")
except NotImplementedError:
x = torch.sigmoid(x)
raise
def fn(x):
try:
x = torch.cos(x)
gn(x)
except Exception:
pass
return x
x = torch.randn(4)
fn(x)
# Can't use fullgraph=True because RERAISE is not supported
opt_fn = torch.compile(fn, backend="eager")
opt_fn(x)
def test_exception_with_ctx_manager(self):
def fn(x):
x = torch.cos(x)
try:
with torch.no_grad():
x = torch.sin(x)
raise NotImplementedError("Not implemented")
except NotImplementedError:
x = torch.sigmoid(x)
return x
x = torch.randn(4)
ref = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_exception_raised_from_child(self):
def gn():
raise NotImplementedError("foo")
def fn(x):
x = torch.cos(x)
try:
x = torch.sin(x)
gn()
x = torch.sin(x)
except Exception:
x = torch.sigmoid(x)
return x
x = torch.randn(4)
ref = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_dynamo_undo_kw_names(self):
def g(x, k=None):
if k:
raise TypeError("error")
return x.sin()
def fn(x):
d = {"a": x}
try:
g(x, k=True)
except Exception:
y = 0
for _, b in d.items(): # noqa: PERF102
y += b.sum()
return y
x = torch.randn(2, 3)
expected = fn(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
got = opt_fn(x)
self.assertEqual(expected, got)
def test_raise_custom_exception(self):
class Exc(Exception):
pass
@torch.compile(backend="eager", fullgraph=True)
def fn(t):
try:
raise Exc
except Exc:
return t.sin()
except Exception:
return t.cos()
t = torch.randn(2)
y = fn(t)
self.assertEqual(y, t.sin())
def test_raise_custom_exception_with_args(self):
class Exc(Exception):
pass
@torch.compile(backend="eager", fullgraph=True)
def fn(t):
try:
raise Exc(1, 2.0)
except Exc as e:
return t.sin() + e.args[0] + e.args[1]
except Exception:
return t.cos()
t = torch.randn(2)
y = fn(t)
self.assertEqual(y, t.sin() + 1 + 2.0)
def test_nn_module_getattr(self):
class A:
def __init__(self) -> None:
self._b = 20
def __getattr__(self, name):
fixed_name = "_" + name
if fixed_name in self.__dict__:
return self.__dict__[fixed_name]
raise AttributeError(f"{name} absent")
class B(A):
def __init__(self) -> None:
self.a = 10
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return 30
obj = B()
def fn(x):
return x * obj.a * obj.b * obj.c
x = torch.ones(4)
ref = fn(x)
print(ref)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
@torch._dynamo.config.patch(inline_inbuilt_nn_modules=True)
def test_custom_getattr_on_module_exception(self):
class Foo(torch.nn.Module):
def __init__(self, a=3):
super().__init__()
self.register_parameter("a", torch.nn.Parameter(torch.ones(4) * 2))
def __getattr__(self, name):
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "a_copy":
return self.a
raise
def forward(self, x):
return x * self.a * self.a_copy
mod = Foo()
opt_mod = torch.compile(mod, backend="eager", fullgraph=True)
x = torch.ones(4)
self.assertEqual(mod(x), opt_mod(x))
def test_attribute_error_from_getattr(self):
class Mock:
def __init__(self):
self.a = 5
def __getattr__(self, name):
if name != "a":
raise AttributeError("missing")
return self.__dict__["a"]
mock = Mock()
def fn(x):
if hasattr(mock, "b"):
return torch.cos(x)
return torch.sin(x)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_stop_iteration(self):
def zip_longest(*iterables, fillvalue=None):
# Get the iterators for each iterable
iterators = [iter(it) for it in iterables]
result = []
while True:
for it in iterators:
try:
value = next(it)
except StopIteration:
result.append(fillvalue)
return result
result.append(value)
def fn(x, y):
torch.cos(torch.randn(4))
return tuple(zip_longest(x, y))
x = [1, 2, 3, 4]
y = [10, 11, 12]
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
ref = fn(x, y)
res = opt_fn(x, y)
self.assertEqual(ref, res)
def test_nn_reraise(self):
class M(torch.nn.Module):
def forward(self, x):
raise ValueError("woof")
return x + 2
m = M()
m.register_forward_pre_hook(lambda m, go: None)
torch._dynamo.utils.clear_compilation_metrics()
opt_call = torch.compile(lambda x: m(x), backend="eager")
self.assertRaises(ValueError, lambda: opt_call(torch.randn(3)))
metrics = torch._dynamo.utils.get_compilation_metrics()
self.assertIn("Observed exception", metrics[0].fail_reason)
def test_key_error(self):
def fn(x, d):
try:
a = d["b"]
except KeyError:
a = 2
return x * a
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
d = {"a": 1}
ref = fn(x, d)
res = opt_fn(x, d)
self.assertEqual(ref, res)
def test_atrribute_error(self):
class Mock:
def __init__(self):
self.a = 1
mock = Mock()
def fn(x):
try:
c = 2
mock.b
except AttributeError:
c = 3
return torch.sin(x) * c
opt_fn = torch.compile(fn, backend="eager")
x = torch.randn(4)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_raise_from_None(self):
# Inspired from os.environ
class MyMapping:
def __init__(self, d):
self._d = d
def __getitem__(self, key):
try:
value = self._d[key]
except KeyError:
raise KeyError(key) from None
return value
d = MyMapping({"a": 10, "b": 20})
def mapping_get(obj, key, value=None):
try:
return obj.__getitem__(key)
except KeyError:
return value
def fn(x, d, key):
x = torch.sin(x + 1)
return x, mapping_get(d, key)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.rand(2, 3)
ref = fn(x, d, "m")
res = opt_fn(x, d, "m")
self.assertEqual(ref[0], res[0])
self.assertEqual(ref[1], res[1])
@make_dynamo_test
def test_raise_from_None_2(self):
def fn():
try:
raise ValueError
except Exception:
raise TypeError from None
try:
fn()
except TypeError as e:
assert e.__cause__ is None
assert e.__suppress_context__ is True
@make_dynamo_test
def test_raise_from_other(self):
def fn():
try:
raise ValueError
except Exception as e:
raise TypeError from e
try:
fn()
except TypeError as e:
assert isinstance(e.__cause__, ValueError)
assert e.__suppress_context__ is True
@make_dynamo_test
def test_reraise_first_exc(self):
def fn():
try:
raise ZeroDivisionError
except ZeroDivisionError:
try:
raise ValueError # noqa: B904
except ValueError:
pass
raise
try:
fn()
except ZeroDivisionError:
pass
assert sys.exc_info()[0] is None
@make_dynamo_test
def test_ensure_exception_is_active_after_try_except_block(self):
try:
try:
raise ZeroDivisionError
except ZeroDivisionError:
for exc in (KeyError, IndexError):
try:
raise exc
except exc:
pass
raise
except ZeroDivisionError:
pass
assert sys.exc_info()[0] is None
@make_dynamo_test
def test_ensure_exception_is_active_inside_try_except_block(self):
try:
try:
raise ZeroDivisionError
except ZeroDivisionError:
for exc in (KeyError, IndexError):
try:
raise exc
except exc as e:
assert isinstance(e.__context__, ZeroDivisionError)
raise
except ZeroDivisionError:
pass
assert sys.exc_info()[0] is None
@make_dynamo_test
def test_handle_all_exceptions(self):
def cm():
try:
yield 1
except ValueError:
try:
raise TypeError # noqa: B904
finally:
pass
try:
gen = cm()
next(gen)
gen.throw(ValueError)
except TypeError:
pass
assert sys.exc_info()[0] is None
@make_dynamo_test
def test_reraise(self):
try:
try:
raise ValueError
except ValueError: # noqa: TRY203
raise
except ValueError:
pass
assert sys.exc_info()[0] is None
@make_dynamo_test
def test_raise_finally_simple(self):
def fn():
try:
raise ValueError
except ValueError:
try:
raise TypeError # noqa: B904
finally:
pass
try:
fn()
except TypeError:
pass
assert sys.exc_info()[0] is None
def test_reconstruct___context__(self):
@torch.compile(backend="eager", fullgraph=True)
def fn(t):
v = ValueError(1, 2, 3)
v.__context__ = TypeError()
v.__cause__ = RuntimeError()
return t.sin(), v
t = torch.randn(2)
y, v = fn(t)
self.assertEqual(y, t.sin())
self.assertIsInstance(v, ValueError)
self.assertIsInstance(v.__context__, TypeError)
self.assertIsInstance(v.__cause__, RuntimeError)
self.assertTrue(v.__suppress_context__)
def test_reconstruct_exception_2(self):
@torch.compile(backend="eager", fullgraph=True)
def fn(t):
try:
raise ValueError(1, 2, 3)
except Exception:
try:
raise TypeError(4, 5) from None
except Exception as e:
e.__cause__ = RuntimeError(6, 7)
return t.sin(), e
t = torch.randn(2)
y, v = fn(t)
self.assertEqual(y, t.sin())
self.assertIsInstance(v, TypeError)
self.assertIsInstance(v.__context__, ValueError)
self.assertIsInstance(v.__cause__, RuntimeError)
def test_raise_GeneratorExit(self):
# GeneratorExit does not inherit from Exception
@torch.compile(backend="eager", fullgraph=True)
def fn(t):
try:
raise GeneratorExit
except Exception:
return t.sin()
except BaseException: # noqa: B036
return t.cos()
t = torch.randn(2)
y = fn(t)
self.assertEqual(y, t.cos())
def test_speculation_exception(self):
log = SpeculationLog()
log.next("fake", 555, "fake", Instruction(1, "fake", 1, 1))
log.restart()
with self.assertRaises(SpeculationLogDivergence):
log.next("bad", 58, "bad", Instruction(2, "different", 2, 2))
def test_dict_pop(self):
# Pattern from inspect.bind
def fn(dt, x):
try:
dt.pop("b")
except KeyError:
return torch.sin(x)
else:
return torch.cos(x)
d = {"a": 1}
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
self.assertEqual(fn(d, x), opt_fn(d, x))
self.assertEqual(fn({"a": 1, "b": 2}, x), opt_fn({"a": 1, "b": 2}, x))
def test_block_stack_cleanup(self):
params = {
"a": 3,
"b": 4,
"c": 5,
}
dt = {
"c": 5,
}
def fn(x):
for name in params:
try:
x = x * dt[name]
except KeyError:
x = x * torch.sin(x)
return x
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
x = torch.randn(4)
self.assertEqual(fn(x), opt_fn(x))
def test_set_cause_with_arg(self):
@torch.compile(backend="eager", fullgraph=True)
def fn(t, err):
err.__cause__ = ValueError()
return t.sin()
t = torch.randn(2)
e = TypeError("abcd")
fn(t, e)
self.assertIsInstance(e.__cause__, ValueError)
def test_set_cause_with_arg_error(self):
@torch.compile(backend="eager", fullgraph=True)
def fn(t, err):
err.__cause__ = 2
return t.sin()
t = torch.randn(2)
e = TypeError("abcd")
with self.assertRaisesRegex(TypeError, "exception cause must be"):
fn(t, e)
@parametrize(
"ex",
[TypeError, CustomException],
name_fn=lambda x: x.__name__,
)
@make_dynamo_test
def test_set___cause__(self, ex):
def fn():
try:
raise ex
except ex:
raise TypeError from None
try:
fn()
except TypeError as e:
assert isinstance(e.__context__, ex)
assert e.__cause__ is None
assert e.__suppress_context__ is True
@parametrize(
"ex",
[RuntimeError, CustomException],
name_fn=lambda x: x.__name__,
)
@make_dynamo_test
def test_set___cause___error(self, ex):
def fn():
try:
raise ex
except Exception as e:
e.__cause__ = 2
raise
z = 0
try:
fn()
except TypeError as e:
z = 1
assert e.args == (
"exception cause must be None or derive from BaseException",
)
except Exception:
raise AssertionError from None
assert z == 1
def test_user_defined_exception_variable(self):
def fn(t):
z = 0
try:
raise CustomException
except ValueError:
z = 1
except CustomException as e:
# trying to call python_type on the
# UserDefinedExceptionClassVariable
cls = type(e)
if type(cls) is type:
t = t + 1
z = 2
assert z == 2
return t.sin()
t = torch.randn(2)
fn(t)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(t), opt_fn(t))
def test_user_defined_exception_with_args(self):
@torch.compile(backend="eager", fullgraph=True)
def fn(t):
z = 0
try:
raise CustomExceptionWithArgs(2, b=3)
except ValueError:
z = 1
except CustomExceptionWithArgs:
z = 2
assert z == 2
t = torch.randn(2)
fn(t)
@make_dynamo_test
def test_raise_set___context__(self):
try:
raise TypeError
except TypeError as e:
exc = e
assert exc.__context__ is None
try:
raise ValueError
except ValueError as e:
exc2 = e
assert exc2.__context__ is None
def test_exception_kwargs(self):
@torch.compile(backend="eager", fullgraph=True)
def fn():
raise AttributeError(name="a")
self.assertRaises(Unsupported, fn)
def test_stack_trace_from_observed_exception(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(16, 16)
def forward(self, x):
# no attribute w on self.linear
weight = self.linear.w
return torch.nn.functional.linear(x, weight)
x = (torch.randn(4, 16, requires_grad=True),)
with self.assertRaisesRegex(Exception, "weight = self.linear.w"):
torch._dynamo.functional_export.dynamo_graph_capture_for_export(Model())(x)
instantiate_parametrized_tests(ExceptionTests)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
ExceptionTests
|
python
|
google__pytype
|
pytype/abstract/_singletons.py
|
{
"start": 7948,
"end": 9038
}
|
class ____(Singleton):
"""An empty value.
These values represent items extracted from empty containers. Because of false
positives in flagging containers as empty (consider:
x = []
def initialize():
populate(x)
def f():
iterate(x)
), we treat these values as placeholders that we can do anything with, similar
to Unsolvable, with the difference that they eventually convert to
NothingType so that cases in which they are truly empty are discarded (see:
x = ... # type: List[nothing] or Dict[int, str]
y = [i for i in x] # The type of i is int; y is List[int]
). On the other hand, if Empty is the sole type candidate, we assume that the
container was populated elsewhere:
x = []
def initialize():
populate(x)
def f():
return x[0] # Oops! The return type should be Any rather than nothing.
The nothing -> anything conversion happens in
convert.Converter._function_to_def and tracer_vm.CallTracer.pytd_for_types.
"""
def __init__(self, ctx: "context.Context") -> None:
super().__init__("empty", ctx)
|
Empty
|
python
|
py-pdf__pypdf
|
pypdf/constants.py
|
{
"start": 16762,
"end": 17486
}
|
class ____:
"""Table 58 – Entries in a Graphics State Parameter Dictionary"""
TYPE = "/Type" # name, optional
LW = "/LW" # number, optional
LC = "/LC" # integer, optional
LJ = "/LJ" # integer, optional
ML = "/ML" # number, optional
D = "/D" # array, optional
RI = "/RI" # name, optional
OP = "/OP"
op = "/op"
OPM = "/OPM"
FONT = "/Font" # array, optional
BG = "/BG"
BG2 = "/BG2"
UCR = "/UCR"
UCR2 = "/UCR2"
TR = "/TR"
TR2 = "/TR2"
HT = "/HT"
FL = "/FL"
SM = "/SM"
SA = "/SA"
BM = "/BM"
S_MASK = "/SMask" # dictionary or name, optional
CA = "/CA"
ca = "/ca"
AIS = "/AIS"
TK = "/TK"
|
GraphicsStateParameters
|
python
|
tornadoweb__tornado
|
tornado/test/web_test.py
|
{
"start": 16804,
"end": 17108
}
|
class ____(RequestHandler):
def initialize(self, test):
self.test = test
@gen.coroutine
def get(self):
self.test.on_handler_waiting()
yield self.test.cleanup_event.wait()
def on_connection_close(self):
self.test.on_connection_close()
|
ConnectionCloseHandler
|
python
|
huggingface__transformers
|
src/transformers/models/udop/modeling_udop.py
|
{
"start": 9480,
"end": 10828
}
|
class ____(nn.Module):
"""2D Image to Patch Embeddings"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.proj = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values):
batch_size, num_channels, height, width = pixel_values.shape
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
)
embeddings = self.proj(pixel_values)
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings
@auto_docstring
|
UdopPatchEmbeddings
|
python
|
numpy__numpy
|
numpy/_core/tests/test_overrides.py
|
{
"start": 16440,
"end": 18382
}
|
class ____:
def test_set_module(self):
assert_equal(np.sum.__module__, 'numpy')
assert_equal(np.char.equal.__module__, 'numpy.char')
assert_equal(np.fft.fft.__module__, 'numpy.fft')
assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
def test_inspect_sum(self):
signature = inspect.signature(np.sum)
assert_('axis' in signature.parameters)
def test_override_sum(self):
MyArray, implements = _new_duck_type_and_implements()
@implements(np.sum)
def _(array):
return 'yes'
assert_equal(np.sum(MyArray()), 'yes')
def test_sum_on_mock_array(self):
# We need a proxy for mocks because __array_function__ is only looked
# up in the class dict
class ArrayProxy:
def __init__(self, value):
self.value = value
def __array_function__(self, *args, **kwargs):
return self.value.__array_function__(*args, **kwargs)
def __array__(self, *args, **kwargs):
return self.value.__array__(*args, **kwargs)
proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
proxy.value.__array_function__.return_value = 1
result = np.sum(proxy)
assert_equal(result, 1)
proxy.value.__array_function__.assert_called_once_with(
np.sum, (ArrayProxy,), (proxy,), {})
proxy.value.__array__.assert_not_called()
def test_sum_forwarding_implementation(self):
class MyArray(np.ndarray):
def sum(self, axis, out):
return 'summed'
def __array_function__(self, func, types, args, kwargs):
return super().__array_function__(func, types, args, kwargs)
# note: the internal implementation of np.sum() calls the .sum() method
array = np.array(1).view(MyArray)
assert_equal(np.sum(array), 'summed')
|
TestNumPyFunctions
|
python
|
bokeh__bokeh
|
src/bokeh/models/annotations/dimensional.py
|
{
"start": 1733,
"end": 2383
}
|
class ____(Model):
""" A base class for models defining units of measurement.
"""
# explicit __init__ to support Init signatures
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
ticks = Required(List(Float), help="""
Preferred values to choose from in non-exact mode.
""")
include = Nullable(List(String), default=None, help="""
An optional subset of preferred units from the basis.
""")
exclude = List(String, default=[], help="""
A subset of units from the basis to avoid.
""")
@abstractmethod
def is_known(self, unit: str) -> bool:
pass
|
Dimensional
|
python
|
scipy__scipy
|
scipy/spatial/tests/test_kdtree.py
|
{
"start": 4516,
"end": 4671
}
|
class ____(_Test_random):
def setup_method(self):
super().setup_method()
self.x = np.random.randn(self.m)+10
@KDTreeTest
|
_Test_random_far
|
python
|
huggingface__transformers
|
tests/deepspeed/test_alst_ulysses_sp.py
|
{
"start": 1121,
"end": 7450
}
|
class ____(TestCasePlus):
"""Test Trainer with ALST/Ulysses sequence parallelism enabled via accelerate's ParallelismConfig."""
@require_torch_multi_accelerator
@require_accelerate
@slow
def test_sp_equivalence(self):
"""Test that ALST/Ulysses sequence parallelism produces the same losses as without it."""
# shared setup
world_size = 2
script_path = __file__ # self.test_file_dir} / "test_alst_ulysses_sp.py"
ds_config_path = self.test_file_dir / "ds_config_zero2.json"
# step 1. Run with SP enabled (sp_size=world_size)
sp_yes_output_dir = self.get_auto_remove_tmp_dir(return_pathlib_obj=True)
sp_yes_accelerate_config_path = sp_yes_output_dir / "context_parallel_config.yaml"
sp_yes_losses_path = sp_yes_output_dir / "sp_yes_losses.json"
write_file(
sp_yes_accelerate_config_path,
f"""
distributed_type: DEEPSPEED
deepspeed_config:
deepspeed_config_file: {ds_config_path}
machine_rank: 0
num_machines: 1
num_processes: {world_size}
parallelism_config:
parallelism_config_sp_size: {world_size}
parallelism_config_sp_backend: deepspeed
parallelism_config_sp_seq_length_is_variable: true
parallelism_config_sp_attn_implementation: sdpa
""",
)
cmd_sp = f"""
accelerate launch
--config_file {sp_yes_accelerate_config_path}
{script_path}
--output_dir {sp_yes_output_dir}
--report_to none
--max_steps 10
--per_device_train_batch_size 1
--gradient_accumulation_steps 1
--logging_steps 1
--remove_unused_columns False
--seed 42
--loss_output_file {sp_yes_losses_path}
""".split()
execute_subprocess_async(cmd_sp, env=self.get_env())
# step 2. Run without SP enabled (sp_size=world_size)
sp_no_output_dir = self.get_auto_remove_tmp_dir(return_pathlib_obj=True)
sp_no_accelerate_config_path = sp_no_output_dir / "context_parallel_config.yaml"
sp_no_losses_path = sp_no_output_dir / "sp_yes_losses.json"
write_file(
sp_no_accelerate_config_path,
f"""
distributed_type: DEEPSPEED
deepspeed_config:
deepspeed_config_file: {ds_config_path}
machine_rank: 0
num_machines: 1
num_processes: {world_size}
""",
)
cmd_sp = f"""
accelerate launch
--config_file {sp_no_accelerate_config_path}
{script_path}
--output_dir {sp_no_output_dir}
--report_to none
--max_steps 10
--per_device_train_batch_size 1
--gradient_accumulation_steps 1
--logging_steps 1
--remove_unused_columns False
--seed 42
--loss_output_file {sp_no_losses_path}
""".split()
execute_subprocess_async(cmd_sp, env=self.get_env())
# Compare losses - should be very close since SP just splits sequence computation
sp_yes_losses = read_json_file(sp_yes_losses_path)
sp_no_losses = read_json_file(sp_no_losses_path)
assert len(sp_yes_losses) == len(sp_no_losses), (
f"Different number of losses: SP has {len(sp_yes_losses)}, no-SP has {len(sp_no_losses)}"
)
# ALST/UlyssesSP should produce very similar results (small numerical differences expected)
# The differences come from:
# - Different gradient reduction patterns in distributed training
# - BF16 mixed precision accumulated differences
sp_yes_losses_tensor = torch.tensor(sp_yes_losses)
sp_no_losses_tensor = torch.tensor(sp_no_losses)
torch.testing.assert_close(
sp_yes_losses_tensor,
sp_no_losses_tensor,
atol=2e-2,
rtol=2e-5,
msg=f"SP-enabled losses {sp_yes_losses} do not match SP-disabled losses {sp_no_losses}",
)
if __name__ == "__main__":
model_name = "hf-internal-testing/tiny-random-LlamaForCausalLM"
# Parse custom arguments (not TrainingArguments parameters)
loss_output_file = None
if "--loss_output_file" in sys.argv:
idx = sys.argv.index("--loss_output_file")
loss_output_file = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
attn_implementation="sdpa", # SP requires SDPA or FA
)
# fix the outdated testing model config
model.generation_config.pad_token_id = 1
# Create simple dataset: just tokenize some text
texts = [
"The quick brown fox jumps over the lazy dog. " * 10,
"Hello world, this is a test sentence for training. " * 10,
] * 4 # 8 samples total
def tokenize_function(examples):
return tokenizer(examples, max_length=128, truncation=True, padding="max_length")
train_dataset = [tokenize_function(text) for text in texts]
# Use standard DataCollatorForLanguageModeling for causal LM
# pad_to_multiple_of=4 ensures sequences are divisible by sp_size * 2 (for sp_size=2)
# Trainer will automatically generate position_ids and shift_labels as needed
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=False, # Causal language modeling
pad_to_multiple_of=4,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
data_collator=data_collator,
)
# Train for a few steps
trainer.train()
# Verify training completed
assert trainer.state.global_step > 0, "Training should have completed at least one step"
# Save losses to file if requested (for equivalence testing)
if loss_output_file and training_args.process_index == 0:
losses = [log["loss"] for log in trainer.state.log_history if "loss" in log]
with open(loss_output_file, "w") as f:
json.dump(losses, f)
|
TestTrainerALSTUlyssesSP
|
python
|
numba__llvmlite
|
llvmlite/tests/test_binding.py
|
{
"start": 24691,
"end": 29817
}
|
class ____(BaseTest):
"""
Test miscellaneous functions in llvm.binding.
"""
def test_parse_assembly(self):
self.module(asm_sum)
def test_parse_assembly_error(self):
with self.assertRaises(RuntimeError) as cm:
self.module(asm_parse_error)
s = str(cm.exception)
self.assertIn("parsing error", s)
self.assertIn("invalid operand type", s)
def test_nonalphanum_block_name(self):
mod = ir.Module()
ft = ir.FunctionType(ir.IntType(32), [])
fn = ir.Function(mod, ft, "foo")
bd = ir.IRBuilder(fn.append_basic_block(name="<>!*''#"))
bd.ret(ir.Constant(ir.IntType(32), 12345))
asm = str(mod)
self.assertEqual(asm, asm_nonalphanum_blocklabel)
def test_global_context(self):
gcontext1 = llvm.context.get_global_context()
gcontext2 = llvm.context.get_global_context()
assert gcontext1 == gcontext2
def test_dylib_symbols(self):
llvm.add_symbol("__xyzzy", 1234)
llvm.add_symbol("__xyzzy", 5678)
addr = llvm.address_of_symbol("__xyzzy")
self.assertEqual(addr, 5678)
addr = llvm.address_of_symbol("__foobar")
self.assertIs(addr, None)
def test_get_default_triple(self):
triple = llvm.get_default_triple()
self.assertIsInstance(triple, str)
self.assertTrue(triple)
def test_get_process_triple(self):
# Sometimes we get synonyms for PPC
def normalize_ppc(arch):
if arch == 'powerpc64le':
return 'ppc64le'
else:
return arch
triple = llvm.get_process_triple()
default = llvm.get_default_triple()
self.assertIsInstance(triple, str)
self.assertTrue(triple)
default_arch = normalize_ppc(default.split('-')[0])
triple_arch = normalize_ppc(triple.split('-')[0])
# Arch must be equal
self.assertEqual(default_arch, triple_arch)
def test_get_host_cpu_features(self):
features = llvm.get_host_cpu_features()
# Check the content of `features`
self.assertIsInstance(features, dict)
self.assertIsInstance(features, llvm.FeatureMap)
for k, v in features.items():
self.assertIsInstance(k, str)
self.assertTrue(k) # single feature string cannot be empty
self.assertIsInstance(v, bool)
self.assertIsInstance(features.flatten(), str)
re_term = r"[+\-][a-zA-Z0-9\._-]+"
regex = r"^({0}|{0}(,{0})*)?$".format(re_term)
# quick check for our regex
self.assertIsNotNone(re.match(regex, ""))
self.assertIsNotNone(re.match(regex, "+aa"))
self.assertIsNotNone(re.match(regex, "+a,-bb"))
# check CpuFeature.flatten()
if len(features) == 0:
self.assertEqual(features.flatten(), "")
else:
self.assertIsNotNone(re.match(regex, features.flatten()))
def test_get_host_cpu_name(self):
cpu = llvm.get_host_cpu_name()
self.assertIsInstance(cpu, str)
self.assertTrue(cpu)
def test_initfini(self):
code = """if 1:
from llvmlite import binding as llvm
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
llvm.initialize_all_targets()
llvm.initialize_all_asmprinters()
llvm.shutdown()
"""
subprocess.check_call([sys.executable, "-c", code])
def test_deprecated_init(self):
regex = r"llvmlite.binding.initialize\(\) is deprecated"
with self.assertRaisesRegex(RuntimeError, expected_regex=regex):
llvm.initialize()
def test_set_option(self):
# We cannot set an option multiple times (LLVM would exit() the
# process), so run the code in a subprocess.
code = """if 1:
from llvmlite import binding as llvm
llvm.set_option("progname", "-debug-pass=Disabled")
"""
subprocess.check_call([sys.executable, "-c", code])
def test_version(self):
major, minor, patch = llvm.llvm_version_info
# one of these can be valid
valid = (20,)
self.assertIn(major, valid)
self.assertIn(patch, range(9))
def test_check_jit_execution(self):
llvm.check_jit_execution()
@unittest.skipIf(no_de_locale(), "Locale not available")
def test_print_double_locale(self):
m = self.module(asm_double_locale)
expect = str(m)
# Change the locale so that comma is used as decimal-point
# to trigger the LLVM bug (llvmlite issue #80)
locale.setlocale(locale.LC_ALL, 'de_DE')
# The LLVM bug is trigged by print the module with double constant
got = str(m)
# Changing the locale should not affect the LLVM IR
self.assertEqual(expect, got)
def test_no_accidental_warnings(self):
code = "from llvmlite import binding"
flags = "-Werror"
cmdargs = [sys.executable, flags, "-c", code]
subprocess.check_call(cmdargs)
|
TestMisc
|
python
|
tensorflow__tensorflow
|
tensorflow/python/saved_model/load.py
|
{
"start": 7157,
"end": 50120
}
|
class ____(object):
"""Helper class to load an object-based SavedModel."""
def __init__(self, object_graph_proto, saved_model_proto, export_dir,
ckpt_options, save_options, filters):
meta_graph = saved_model_proto.meta_graphs[0]
self._asset_file_def = meta_graph.asset_file_def
self._operation_attributes = {
node.name: node.attr for node in meta_graph.graph_def.node}
self._proto = object_graph_proto
self._export_dir = export_dir
self._concrete_functions = (
function_deserialization.load_function_def_library(
library=meta_graph.graph_def.library,
saved_object_graph=self._proto,
wrapper_function=_WrapperFunction))
# Store a set of all concrete functions that have been set up with
# captures.
self._restored_concrete_functions = set()
self._checkpoint_options = ckpt_options
self._save_options = save_options
# Metagraph has a mapping from FunctionDef name to aliases
self._concrete_function_aliases = meta_graph.meta_info_def.function_aliases
self.function_aliases = {}
if self._save_options.experimental_load_function_aliases:
# Create a mapping from aliases to polymorphic restored functions or lists
# of concrete functions. This mapping can later be used with SaveOptions
# when re-saving the loaded object to a SavedModel. We start with a
# mapping from aliases to lists of concrete functions. Later in
# _recreate_function, on a entry by entry basis, we replace lists with
# polymorphic restored functions if the concrete function associated with
# a restored function is identical to a list of concrete functions in an
# entry.
concrete_func_list_by_alias = collections.defaultdict(list)
for concrete_func_name, alias in self._concrete_function_aliases.items():
if concrete_func_name not in self._concrete_functions:
logging.warn(
(
"ConcreteFunction `%s` is listed in function alias but it"
" is not found."
),
concrete_func_name,
)
continue
concrete_function = self._concrete_functions[concrete_func_name]
concrete_func_list_by_alias[alias].append(concrete_function)
self.function_aliases = dict(concrete_func_list_by_alias)
self._pretty_printer = checkpoint.ObjectGraphProtoPrettyPrinter(self._proto)
# Stores user-defined node_filters argument.
self._node_filters = filters
# Stores map of string paths to integers.
self._node_path_to_id = self._convert_node_paths_to_ints()
self._loaded_nodes = {}
if isinstance(filters, dict):
# If node_filters is a dict, then the values may contain already created
# trackable objects. In this case, create a dictionary mapping node IDs to
# the already created nodes. This dict will be updated in
# `_retrieve_all_filtered_nodes` with tracked children.
for node_path, node in filters.items():
if isinstance(node, tuple):
self._loaded_nodes[self._node_path_to_id[node_path]] = node
else:
self._loaded_nodes[self._node_path_to_id[node_path]] = (node, setattr)
# Get a list of all integer node ids to load, or None if all nodes should be
# loaded. This list includes ids of child nodes.
self._filtered_nodes = self._retrieve_all_filtered_nodes()
# Order all nodes or filtered nodes using the dependencies.
self._ordered_node_ids = self._generate_ordered_node_ids()
self._load_all()
if not save_options.experimental_skip_checkpoint:
self._restore_checkpoint()
for node in self._nodes:
if isinstance(node, resource.CapturableResource):
init_op = node._initialize() # pylint: disable=protected-access
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
def _convert_node_paths_to_ints(self):
"""Maps all string node paths in node_filters to the int node ids."""
if self._node_filters is None:
return None
path_to_int = {}
for node_id in self._node_filters:
int_node_id = None
if isinstance(node_id, str):
node_path = node_id.split(".")
if node_path[0] != "root":
raise ValueError(
"When passing string identifiers to node_filters, the first name"
f" must be root. Received {node_path[0]}.")
int_node_id = 0
for n, name in enumerate(node_path[1:]):
int_node_id = self._find_node_child(
int_node_id, name, ".".join(node_path[:n+2]))
path_to_int[node_id] = int_node_id
else:
raise TypeError("Elements in node_filters must be strings.")
return path_to_int
def _retrieve_all_filtered_nodes(self):
"""Traverses through the object graph to get the IDs of all nodes to load.
As a side-effect, if node_filters is a dictionary that contains already-
created objects, then the children tracked by those objects will be
added to node_filters.
Returns:
List of all nodes to load, or None if all nodes should be loaded.
"""
if self._node_filters is None:
return None # All nodes should be loaded.
all_filtered_nodes = set()
nodes_to_visit = list(self._node_filters)
while nodes_to_visit:
node_path = nodes_to_visit.pop(0)
node_id = self._node_path_to_id[node_path]
if node_id in all_filtered_nodes:
continue
all_filtered_nodes.add(node_id)
node, setter = self._loaded_nodes.get(node_id, (None, None))
if node is not None:
if not isinstance(node, base.Trackable):
raise TypeError(
"Error when processing dictionary values passed to nodes_to_load."
f"Object at {node_path} is expected to be a checkpointable (i.e. "
"'trackable') TensorFlow object (e.g. tf.Variable, tf.Module or "
"Keras layer).")
node._maybe_initialize_trackable() # pylint: disable=protected-access
for reference in self._proto.nodes[node_id].children:
child_object, _ = self._loaded_nodes.get(
reference.node_id, (None, None))
# See if node already tracks the child reference, in which case add the
# child to the loaded_nodes dict.
if child_object is None and node is not None:
child_object = node._lookup_dependency(reference.local_name) # pylint: disable=protected-access
if isinstance(child_object, data_structures.TrackableDataStructure):
# Make setattr a noop to avoid overwriting already existing data
# structures.
setter = lambda *args: None
self._loaded_nodes[reference.node_id] = (child_object, setter)
child_path = "{}.{}".format(node_path, reference.local_name)
self._node_path_to_id[child_path] = reference.node_id
nodes_to_visit.append(child_path)
if 0 in all_filtered_nodes:
return None
return all_filtered_nodes
def _find_node_child(self, node_id, child_name, path):
for reference in self._proto.nodes[node_id].children:
if reference.local_name == child_name:
return reference.node_id
raise ValueError(f"Unable to find node {path}.")
def _load_all(self):
"""Loads all nodes and functions from the SavedModel and their edges."""
self._load_nodes()
self._load_edges()
# Set up concrete functions that aren't part of the object graph
# (e.g. gradient functions)
self._setup_remaining_functions()
self._load_checkpoint_save_and_restore_functions()
def _load_checkpoint_save_and_restore_functions(self):
"""Restores the checkpoint-related save/restore functions to all nodes."""
temp_session = [None]
for node_id, proto in self._iter_all_nodes():
node = self.get(node_id)
if proto.saveable_objects.keys() == {
trackable_utils.SERIALIZE_TO_TENSORS_NAME}:
# Restore Trackable serialize- and restore-from-tensor functions.
assert len(proto.saveable_objects) == 1
saveable_object_proto = next(iter(proto.saveable_objects.values()))
save_fn_id = saveable_object_proto.save_function
restore_fn_id = saveable_object_proto.restore_function
node._serialize_to_tensors = self.get(save_fn_id) # pylint: disable=protected-access
node._restore_from_tensors = self.get(restore_fn_id) # pylint: disable=protected-access
else:
# Restore legacy SaveableObject functions.
saveable_fn_by_name = {}
for name, saveable_object_proto in proto.saveable_objects.items():
save_fn_id = saveable_object_proto.save_function
restore_fn_id = saveable_object_proto.restore_function
saveable_fn_by_name[name] = (self.get(save_fn_id),
self.get(restore_fn_id))
node._self_saveable_object_factories = ( # pylint: disable=protected-access
saveable_object_util.recreate_saveable_objects(saveable_fn_by_name,
temp_session))
def _load_edges(self):
"""Adds edges from objects to other objects and functions."""
for node_id, object_proto in self._iter_all_nodes():
self._add_object_graph_edges(object_proto, node_id)
# If root object isn't loaded, then create edges from the root for
# checkpoint compatibility.
if self._filtered_nodes is not None and 0 not in self._filtered_nodes:
root = self.get(0)
for node_path in self._node_filters:
loaded_node = self._nodes[self._node_path_to_id[node_path]]
path = node_path.split(".")
current_node = root
for name in path[1:-1]:
if not hasattr(current_node, name):
setattr(current_node, name, self._recreate_base_user_object()[0])
current_node = getattr(current_node, name)
if not hasattr(current_node, path[-1]):
setattr(current_node, path[-1], loaded_node)
def _add_object_graph_edges(self, proto, node_id):
"""Adds edges from an object to its children."""
obj = self._nodes[node_id]
setter = self._node_setters[node_id]
for reference in proto.children:
setter(obj, reference.local_name, self._nodes[reference.node_id])
# Note: if an object has an attribute `__call__` add a class method
# that allows `obj()` syntax to work. This is done per-instance to
# allow `callable` to be used to find out if an object is callable.
if reference.local_name == "__call__" and not callable(obj):
setattr(type(obj), "__call__", _call_attribute)
def _setup_remaining_functions(self):
concrete_function_names = sorted(self._proto.concrete_functions.keys())
for name in concrete_function_names:
if name in self._restored_concrete_functions:
continue
self._setup_function_captures(name, self._nodes)
def _setup_function_captures(self, concrete_function_name, nodes):
"""Setup captures and variables in a restored function."""
if concrete_function_name in self._restored_concrete_functions:
return
self._restored_concrete_functions.add(concrete_function_name)
concrete_function = self._concrete_functions[concrete_function_name]
proto = self._proto.concrete_functions[concrete_function_name]
inputs = [nodes[node_id] for node_id in proto.bound_inputs]
restore_captures.restore_captures(concrete_function, inputs)
def _initialize_loaded_nodes(self):
nodes = {}
node_setters = {}
for node_id, (node, setter) in self._loaded_nodes.items():
nodes[node_id] = node
node_setters[node_id] = setter
return nodes, node_setters
def _get_node_dependencies(self, proto):
"""Returns a dictionary of all dependencies of an object.
Args:
proto: A SavedObject proto.
Returns:
Dict mapping string dependency name *or* int node id to the node id.
The int node id key is used for mapping function captures.
"""
dependencies = {ref.local_name: ref.node_id for ref in proto.dependencies}
kind = proto.WhichOneof("kind")
if kind == "function":
concrete_functions = proto.function.concrete_functions
for fn_name in concrete_functions:
for bound_input in self._proto.concrete_functions[fn_name].bound_inputs:
dependencies[bound_input] = bound_input
elif kind == "bare_concrete_function":
fn_name = proto.bare_concrete_function.concrete_function_name
for bound_input in self._proto.concrete_functions[fn_name].bound_inputs:
dependencies[bound_input] = bound_input
elif kind == "resource":
# Make sure that the resource creator is listed as a dependency.
for child in proto.children:
if child.local_name == "_create_resource":
dependencies["_create_resource"] = child.node_id
return dependencies
def _generate_ordered_node_ids(self):
"""Orders the node ids so that dependencies appear first."""
if self._filtered_nodes is None:
unordered_ids = range(len(self._proto.nodes))
else:
unordered_ids = list(self._filtered_nodes)
# Maps node ids -> list of dependencies (ids of other nodes that must be
# loaded before it).
dependency_map = collections.defaultdict(list)
for node_id in unordered_ids:
deps = dependency_map[node_id]
if self._loaded_nodes.get(node_id) is not None:
# Deps are only used if the node has not been created.
continue
proto = self._proto.nodes[node_id]
for dep in set(self._get_node_dependencies(proto).values()):
deps.append(dep)
if self._filtered_nodes is not None and dep not in self._filtered_nodes:
raise ValueError(
"Unable to partially load SavedModel since the specified filter "
"does not include all required objects for loading (e.g. "
"variables used in functions or deserialization dependencies). "
"Please include this path in the filter: "
f"{self._pretty_printer.node_names[dep]}")
# Add optimizer slot variable to dependency map.
prev_slot = None
for slot_variable_proto in proto.slot_variables:
slot_variable_node_id = slot_variable_proto.slot_variable_node_id
# The optimizer and original variable must be created before the slot
# variable, since the slot variable is generated using the Optimizer's
# add_slot API.
slot_deps = dependency_map[slot_variable_node_id]
slot_deps.append(node_id)
slot_deps.append(slot_variable_proto.original_variable_node_id)
if prev_slot is not None:
# Add previous slot to deps so that the optimizer slot variables are
# added in order. The ordering is needed because the slot name and
# variable are both added to ordered lists, which are exposed to the
# user via `Optimizer.get_slot_names()` and `Optimizer.weights`.
# TODO(kathywu): Maybe enforce some sort of deterministic ordering in
# `order_by_dependency` to avoid doing this?
slot_deps.append(prev_slot)
prev_slot = slot_variable_node_id
try:
return list(trackable_utils.order_by_dependency(dependency_map))
except trackable_utils.CyclicDependencyError:
# This should not happen since there is already a validation for cycles
# when saving, but raise an error just in case.
raise ValueError("Encountered a cycle in the deserialization dependencies"
"in the SavedModel. This is extremely unexpected, please"
"file a bug and make sure you are not manually modifying"
" the SavedModel.")
def _iter_all_nodes(self):
for node_id in self._ordered_node_ids:
yield node_id, self._proto.nodes[node_id]
def _load_nodes(self):
"""Load all saved objects."""
# `nodes` maps from node ids to recreated objects
# `node_setters` maps from node ids to setter functions
# (same signature as setattr) for setting children.
nodes, node_setters = self._initialize_loaded_nodes()
# Figure out which objects are slot variables. These objects are created
# with Optimizer.add_slot rather than _recreate_variable.
# Maps slot node id -> optimizer node id, SlotVariableReference proto
slot_variable_node_ids = {}
for node_id, proto in self._iter_all_nodes():
for slot_variable_proto in proto.slot_variables:
slot_variable_node_id = slot_variable_proto.slot_variable_node_id
slot_variable_node_ids[slot_variable_node_id] = (node_id,
slot_variable_proto)
# Re-create everything.
for node_id, proto in self._iter_all_nodes():
if nodes.get(node_id) is not None:
continue
elif node_id in slot_variable_node_ids:
# Use the public Optimizer interface when creating slot variables.
optimizer_node_id, slot_variable_proto = slot_variable_node_ids[node_id]
optimizer_object = nodes[optimizer_node_id]
optimized_variable = nodes[
slot_variable_proto.original_variable_node_id]
slot_variable = optimizer_object.add_slot(
var=optimized_variable,
slot_name=slot_variable_proto.slot_name)
nodes[slot_variable_proto.slot_variable_node_id] = slot_variable
node_setters[slot_variable_proto.slot_variable_node_id] = setattr
else:
node, setter = self._recreate(proto, node_id, nodes)
nodes[node_id] = node
node_setters[node_id] = setter
# If root object is not loaded, add a dummy root object for checkpoint
# compatibility.
if 0 not in nodes:
nodes[0] = self._recreate_base_user_object()[0]
self._nodes = [nodes.get(node_id)
for node_id in range(len(self._proto.nodes))]
self._node_setters = node_setters
def _restore_checkpoint(self):
"""Load state from checkpoint into the deserialized objects."""
variables_path = path_helpers.get_variables_path(self._export_dir)
# TODO(b/205010730): Clean use of private methods of TrackableSaver.
# pylint: disable=protected-access
saver = checkpoint.TrackableSaver(graph_view.ObjectGraphView(self.get(0)))
with ops.device("CPU"):
saver._file_prefix_placeholder = constant_op.constant(variables_path)
if self._save_options.allow_partial_checkpoint:
load_status = saver.restore(variables_path,
self._checkpoint_options).expect_partial()
load_status.assert_nontrivial_match()
else:
load_status = saver.restore(variables_path, self._checkpoint_options)
load_status.assert_existing_objects_matched()
ckpt = load_status._checkpoint
if not context.executing_eagerly():
reader = py_checkpoint_reader.NewCheckpointReader(variables_path)
# When running in eager mode, the `restore` call above has already run and
# restored the state of trackables, and calling `position.restore_ops()`
# would re-run the restore. In graph mode, that will return a cached list
# of ops that must run to restore the object on that position. We have to
# wire them in the initializers of the objects so that they get
# initialized properly when using common practices (e.g. the ones used by
# ManagedSession) without further user action.
for object_id, obj in dict(ckpt.object_by_proto_id).items():
position = restore.CheckpointPosition(checkpoint=ckpt,
proto_id=object_id)
registered_saver = position.get_registered_saver_name()
if registered_saver:
raise NotImplementedError(
"Loading a SavedModel that uses registered checkpoint saver is "
f"not supported in graph mode. The loaded object {obj} uses the "
f"saver registered with the name {registered_saver}.")
restore_ops = position.restore_ops(reader)
if restore_ops:
if resource_variable_ops.is_resource_variable(obj):
if len(restore_ops) == 1:
obj._initializer_op = restore_ops[0]
else:
obj._initializer_op = control_flow_ops.group(*restore_ops)
elif (isinstance(obj, lookup_ops.LookupInterface) or
isinstance(obj, resource.CapturableResource)):
# We don't need to check for eager execution here, since this code
# path should only be taken if we are restoring in graph mode.
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, restore_ops)
else:
raise NotImplementedError(
f"Unable to restore state of object {obj} from the checkpoint.")
def adjust_debug_info_func_names(self, debug_info):
"""Rewrite func names in the debug info by using the concrete func names."""
output_debug_info = graph_debug_info_pb2.GraphDebugInfo()
output_debug_info.files[:] = debug_info.files
# TODO: b/292007261 - Read name_to_trace_id as well as traces
for key in debug_info.traces:
node, func = key.split("@")
new_func = ""
if func in self._concrete_functions:
new_func = self._concrete_functions[func].function_def.signature.name
output_debug_info.traces[node + "@" + new_func].CopyFrom(
debug_info.traces[key])
return output_debug_info
def get(self, node_id):
if isinstance(node_id, str):
node_id = self._node_path_to_id[node_id]
return self._nodes[node_id]
def _recreate(self, proto, node_id, nodes):
"""Creates a Python object from a SavedObject protocol buffer.
Args:
proto: a SavedObject proto
node_id: int, the index of this object in the SavedObjectGraph node list.
nodes: dict mapping int node_ids -> created objects.
Returns:
The recreated object, and the set-attribute function for reconnecting
the trackable children.
"""
registered_class = registration.get_registered_class(proto.registered_name)
if registered_class is None:
registered_class = _BUILT_IN_REGISTRATIONS.get(proto.WhichOneof("kind"))
dependencies = {}
for key, dep_node_id in self._get_node_dependencies(proto).items():
dependencies[key] = nodes[dep_node_id]
if registered_class:
obj = registered_class._deserialize_from_proto( # pylint: disable=protected-access
proto=proto.serialized_user_proto,
object_proto=proto,
dependencies=dependencies,
export_dir=self._export_dir,
asset_file_def=self._asset_file_def,
operation_attributes=self._operation_attributes)
if isinstance(obj, base.Trackable):
setter = type(obj)._add_trackable_child # pylint: disable=protected-access
else:
# Returned object may be non-Trackable (e.g. when restoring captures).
setter = setattr
return obj, setter
else:
return self._recreate_default(proto, node_id, dependencies)
def _recreate_default(self, proto, node_id, deps):
"""Creates a Python object from a SavedObject protocol buffer."""
factory = {
"user_object": (
lambda: self._recreate_user_object(proto.user_object, node_id)),
"function": lambda: self._recreate_function(proto.function, deps),
"bare_concrete_function": functools.partial(
self._recreate_bare_concrete_function,
proto=proto.bare_concrete_function, dependencies=deps),
"variable": lambda: self._recreate_variable(proto.variable),
"captured_tensor": functools.partial(
self._get_tensor_from_fn, proto.captured_tensor),
}
kind = proto.WhichOneof("kind")
if kind not in factory:
raise ValueError(f"Unknown SavedObject type: {kind}. Expected one of "
f"{list(factory.keys())}.")
return factory[kind]()
def _recreate_user_object(self, proto, node_id):
"""Instantiates a SavedUserObject."""
if proto.identifier == "optimizer":
# Make sure that the Keras optimizers module is imported. This is needed
# to be able to load the "optimizer" object (OptimizerV2), which has
# special logic around adding slot variables with `add_slot` in this file.
try:
import tf_keras # pylint: disable=g-import-not-at-top,unused-import
try:
import tf_keras.optimizers.legacy as _ # pylint: disable=g-import-not-at-top
except ImportError:
try:
import tf_keras.optimizers.optimizer_v2 as _ # pylint: disable=g-import-not-at-top
except ImportError as e:
raise ImportError(
"Error when importing Keras. Unable to load SavedModel that "
"contains an optimizer without the Keras module.") from e
except ImportError:
try:
import keras.optimizers.legacy as _ # pylint: disable=g-import-not-at-top
except ImportError:
try:
import keras.optimizers.optimizer_v2 as _ # pylint: disable=g-import-not-at-top
except ImportError as e:
raise ImportError(
"Error when importing Keras. Unable to load SavedModel that "
"contains an optimizer without the Keras module.") from e
looked_up = revived_types.deserialize(proto)
if looked_up is None:
return self._recreate_base_user_object(proto, node_id)
return looked_up
def _recreate_base_user_object(self, proto=None, node_id=None):
del proto, node_id
# Note: each user object has its own class. This allows making each one
# individually callable by adding a `__call__` method to the classes of
# the objects instances that have a `__call__` property.
class _UserObject(autotrackable.AutoTrackable):
pass
return _UserObject(), setattr
def _recreate_function(self, proto, dependencies):
fn = function_deserialization.recreate_function(
proto, self._concrete_functions)
for name in proto.concrete_functions:
self._setup_function_captures(name, dependencies)
# If the list of concrete functions associated with this polymorphic
# restored function is identical to a list of concrete functions found in
# the function alias mapping, we replace the latter with this restored
# function. Also see comments in the __init__ method.
if self._save_options.experimental_load_function_aliases:
if proto.concrete_functions and all(
name in self._concrete_function_aliases
for name in proto.concrete_functions
):
alias = self._concrete_function_aliases[
next(iter(proto.concrete_functions))
]
aliased = self.function_aliases.get(alias)
assert isinstance(aliased, list)
# Note that we cannot compare f.name below with proto.concrete_functions
# because the former is new name for the restored ConcreteFunction
# object while the latter is the old name in the original proto.
if set(f.name for f in aliased) == set(
f.name for f in fn._list_all_concrete_functions() # pylint: disable=protected-access
):
self.function_aliases[alias] = fn
else:
logging.warn(
(
"Not aliasing '%s' to polymorphic restored function because"
" of mismatched concrete functions: %s vs %s"
),
alias,
set(f.name for f in aliased),
set(f.name for f in fn._list_all_concrete_functions()), # pylint: disable=protected-access
)
return fn, setattr
def _recreate_bare_concrete_function(self, proto, dependencies):
fn = function_deserialization.setup_bare_concrete_function(
proto, self._concrete_functions)
self._setup_function_captures(proto.concrete_function_name, dependencies)
return fn, setattr
def _recreate_variable(self, proto):
name = proto.name if proto.name else None
if name is not None:
dbg_name = name
else:
dbg_name = "<variable loaded from saved model>"
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
proto.synchronization, proto.aggregation, proto.trainable,
name=dbg_name))
def uninitialized_variable_creator(next_creator, **kwargs):
"""A variable creator that creates uninitialized variables."""
del next_creator
return resource_variable_ops.UninitializedVariable(**kwargs)
# Create a variable_creator_scope that creates uninitialized variables with
# a lower priority such that a potential distributed variable_creator_scope
# can take precedence.
with ops.get_default_graph()._variable_creator_scope( # pylint: disable=protected-access
uninitialized_variable_creator,
priority=50):
saved_device = proto.device
load_with_device = (
self._save_options.experimental_variable_policy
._save_variable_devices() and config.get_soft_device_placement() and
saved_device)
if load_with_device:
with ops.device(saved_device):
return variables.Variable(
shape=proto.shape,
dtype=proto.dtype,
name=name,
trainable=trainable,
synchronization=synchronization,
aggregation=aggregation), setattr
else:
return variables.Variable(
shape=proto.shape,
dtype=proto.dtype,
name=name,
trainable=trainable,
synchronization=synchronization,
aggregation=aggregation), setattr
def _get_tensor_from_fn(self, proto):
outer_graph = self._concrete_functions[proto.concrete_function].graph
captured_tensor = outer_graph.get_tensor_by_name(proto.name)
return captured_tensor, setattr
def _call_attribute(instance, *args, **kwargs):
return instance.__call__(*args, **kwargs)
@tf_export("saved_model.load", v1=["saved_model.load_v2"])
def load(export_dir, tags=None, options=None):
"""Load a SavedModel from `export_dir`.
Signatures associated with the SavedModel are available as functions:
```python
imported = tf.saved_model.load(path)
f = imported.signatures["serving_default"]
print(f(x=tf.constant([[1.]])))
```
Objects exported with `tf.saved_model.save` additionally have trackable
objects and functions assigned to attributes:
```python
exported = tf.train.Checkpoint(v=tf.Variable(3.))
exported.f = tf.function(
lambda x: exported.v * x,
input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
tf.saved_model.save(exported, path)
imported = tf.saved_model.load(path)
assert 3. == imported.v.numpy()
assert 6. == imported.f(x=tf.constant(2.)).numpy()
```
_Loading Keras models_
Keras models are trackable, so they can be saved to SavedModel. The object
returned by `tf.saved_model.load` is not a Keras object (i.e. doesn't have
`.fit`, `.predict`, etc. methods). A few attributes and functions are still
available: `.variables`, `.trainable_variables` and `.__call__`.
```python
model = tf.keras.Model(...)
tf.saved_model.save(model, path)
imported = tf.saved_model.load(path)
outputs = imported(inputs)
```
Use `tf.keras.models.load_model` to restore the Keras model.
_Importing SavedModels from TensorFlow 1.x_
1.x SavedModels APIs have a flat graph instead of `tf.function` objects.
These SavedModels will be loaded with the following attributes:
* `.signatures`: A dictionary mapping signature names to functions.
* `.prune(feeds, fetches) `: A method which allows you to extract
functions for new subgraphs. This is equivalent to importing the SavedModel
and naming feeds and fetches in a Session from TensorFlow 1.x.
```python
imported = tf.saved_model.load(path_to_v1_saved_model)
pruned = imported.prune("x:0", "out:0")
pruned(tf.ones([]))
```
See `tf.compat.v1.wrap_function` for details.
* `.variables`: A list of imported variables.
* `.graph`: The whole imported graph.
* `.restore(save_path)`: A function that restores variables from a checkpoint
saved from `tf.compat.v1.Saver`.
_Consuming SavedModels asynchronously_
When consuming SavedModels asynchronously (the producer is a separate
process), the SavedModel directory will appear before all files have been
written, and `tf.saved_model.load` will fail if pointed at an incomplete
SavedModel. Rather than checking for the directory, check for
"saved_model_dir/saved_model.pb". This file is written atomically as the last
`tf.saved_model.save` file operation.
Args:
export_dir: The SavedModel directory to load from.
tags: A tag or sequence of tags identifying the MetaGraph to load. Optional
if the SavedModel contains a single MetaGraph, as for those exported from
`tf.saved_model.save`.
options: `tf.saved_model.LoadOptions` object that specifies options for
loading.
Returns:
A trackable object with a `signatures` attribute mapping from signature
keys to functions. If the SavedModel was exported by `tf.saved_model.save`,
it also points to trackable objects, functions, debug info which it has been
saved.
Raises:
ValueError: If `tags` don't match a MetaGraph in the SavedModel.
"""
if isinstance(export_dir, os.PathLike):
export_dir = os.fspath(export_dir)
result = load_partial(export_dir, None, tags, options)["root"]
return result
@tf_export("__internal__.saved_model.load_partial", v1=[])
def load_partial(export_dir, filters, tags=None, options=None):
"""Partially load a SavedModel (saved from V2).
Similar to `tf.saved_model.load`, but with an additional argument that
lets you specify which nodes to load.
`tf.saved_model.load_partial(export_dir, ["root"])` and
`tf.saved_model.load(export_dir)` are equivalent.
Note: This only works for SavedModels saved with TensorFlow V2 from
`tf.saved_model.save` or Keras.
In Tensorflow V2, SavedModel stores the **object graph** of the saved object.
The graph contains nodes (`tf.Module`, `tf.Variable`, `tf.function`, Keras
layers, etc.) and edges that are the name of the attributes connecting the
objects.
*Example 1*
```
model = tf.Module()
model.child_layer = tf.Module()
model.child_layer.v = tf.Variable(5.)
tf.saved_model.save(model, '/tmp/model')
loaded = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... ['root.child_layer', 'root.child_layer.v'])
loaded['root.child_layer'].v.numpy()
5.
loaded['root.child_layer'].v is loaded['root.child_layer.v']
True
*Example 2*
model = tf.Module()
model.child_layer = tf.Module()
model.child_layer.v = tf.Variable(5.)
>>>
tf.saved_model.save(model, '/tmp/model')
# Create a variable
new_variable = tf.Variable(0.)
loaded = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... {'root.child_layer': None, 'root.child_layer.v': new_variable})
loaded['root.child_layer'].v.numpy()
5.
new_variable.numpy()
5.
```
**Loading under different distribution strategies**
You can load different parts of the model under different distribution
strategies. Note that this is very experimental so use with care.
```
model = tf.Module()
model.layer_1 = tf.Module()
model.layer_1.v = tf.Variable(5.)
model.layer_2 = tf.Module()
model.layer_2.v = tf.Variable(7.)
tf.saved_model.save(model, '/tmp/model')
# Load with no strategy
loaded = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... ['root.layer_1'])
loaded['root.layer_1'].v
<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=5.0>
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
... loaded2 = tf.__internal__.saved_model.load_partial(
... '/tmp/model',
... ['root.layer_2'])
loaded2['root.layer_2'].v
MirroredVariable:{
0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=7.0>
}
```
Args:
export_dir: The SavedModel directory to load from.
filters: A list or dictionary where each element or key is a string
path to nodes that should be loaded. Node paths consist of all the child
attribute names to reach that node in the form: `root.{attribute_name}`.
The loader will load all of the specified nodes and their recursive
descendants. When this option is defined, the loader will return a
dictionary mapping the node paths to the loaded objects.
tags: A tag or sequence of tags identifying the MetaGraph to load. Optional
if the SavedModel contains a single MetaGraph, as for those exported from
`tf.saved_model.save`.
options: `tf.saved_model.LoadOptions` object that specifies options for
loading.
Returns:
A dictionary mapping node paths from the filter to loaded objects.
"""
options = options or load_options.LoadOptions()
if tags is not None and not isinstance(tags, set):
# Supports e.g. tags=SERVING and tags=[SERVING]. Sets aren't considered
# sequences for nest.flatten, so we put those through as-is.
tags = nest.flatten(tags)
saved_model_proto, debug_info = (
loader_impl.parse_saved_model_with_debug_info(export_dir))
loader = None
if (len(saved_model_proto.meta_graphs) == 1 and
saved_model_proto.meta_graphs[0].HasField("object_graph_def")):
metrics.IncrementReadApi(_LOAD_V2_LABEL)
meta_graph_def = saved_model_proto.meta_graphs[0]
# tensor_content field contains raw bytes in litle endian format
# which causes problems when loaded on big-endian systems
# requiring byteswap
if sys.byteorder == "big":
saved_model_utils.swap_function_tensor_content(meta_graph_def, "little",
"big")
if (tags is not None
and set(tags) != set(meta_graph_def.meta_info_def.tags)):
raise ValueError(
f"Got an incompatible argument to `tags`: {tags}. The SavedModel at "
f"{export_dir} has one MetaGraph with tags "
f"{meta_graph_def.meta_info_def.tags}. You may omit the argument, "
"pass 'None', or pass matching tags.")
object_graph_proto = meta_graph_def.object_graph_def
ckpt_options = checkpoint_options.CheckpointOptions(
experimental_io_device=options.experimental_io_device)
with ops.init_scope():
try:
loader = Loader(object_graph_proto, saved_model_proto, export_dir,
ckpt_options, options, filters)
except errors.NotFoundError as err:
raise FileNotFoundError(
str(err) + "\n You may be trying to load on a different device "
"from the computational device. Consider setting the "
"`experimental_io_device` option in `tf.saved_model.LoadOptions` "
"to the io_device such as '/job:localhost'.")
root = loader.get(0)
root.graph_debug_info = loader.adjust_debug_info_func_names(debug_info)
root.tensorflow_version = meta_graph_def.meta_info_def.tensorflow_version
root.tensorflow_git_version = (
meta_graph_def.meta_info_def.tensorflow_git_version)
metrics.IncrementRead(write_version="2")
if options.experimental_load_function_aliases:
if hasattr(root, "function_aliases"):
raise ValueError(
"Could not load with experimental_load_function_aliases option"
" because the top-level object already has an attributed with name"
" 'function_aliases'"
)
root.function_aliases = loader.function_aliases
else:
if filters:
raise ValueError("SavedModels saved from Tensorflow 1.x) cannot be "
"loaded with node filters.")
with ops.init_scope():
root = load_v1_in_v2.load(
export_dir, tags, options.experimental_skip_checkpoint
)
root.graph_debug_info = debug_info
# For privacy concerns, please see the note in
# tensorflow/cc/saved_model/metrics.h
metrics.SetReadPath(saved_model_path=str(export_dir))
# Read and log SavedModel checksum, if it is nonzero.
try:
fingerprint = fingerprinting.read_fingerprint(export_dir)
except FileNotFoundError:
metrics.SetFoundFingerprintOnLoad(found_status=metrics.kFingerprintNotFound)
logging.info(
"Fingerprint not found. Saved model loading will continue.")
singleprint = ""
except RuntimeError:
metrics.SetFoundFingerprintOnLoad(found_status=metrics.kFingerprintError)
logging.exception(
"Fingerprint was found, but there was an error when reading the proto. "
"Saved model loading will continue.")
singleprint = ""
else:
metrics.SetFoundFingerprintOnLoad(found_status=metrics.kFingerprintFound)
metrics.SetReadFingerprint(
fingerprint=fingerprinting_utils.to_proto(
fingerprint).SerializeToString())
singleprint = fingerprint.singleprint()
try:
metrics.SetReadPathAndSingleprint(path=export_dir, singleprint=singleprint)
except metrics.MetricException:
logging.info("path_and_singleprint metric could not be logged. "
"Saved model loading will continue.")
if filters and loader is not None:
return {node_id: loader.get(node_id) for node_id in filters}
else:
return {"root": root}
def is_tf2_saved_model(export_dir):
"""Identifies if an exported SavedModel is a TF2 SavedModel.
There are differences in SavedModel semantics between TF1 and TF2 that are
documented here:
https://www.tensorflow.org/guide/migrate/saved_model#savedmodel. This helper
util function serves to distinguish the TF1 vs TF2 semantics used when
exporting SavedModels.
Args:
export_dir: The SavedModel directory to load from.
Returns:
True if TF2 SavedModel semantics are used, False if TF1 SavedModel semantics
are used.
"""
# Try reading the fingerprint first before parsing the SavedModel proto
try:
fingerprint = fingerprinting.read_fingerprint(export_dir)
if fingerprint.saved_object_graph_hash != 0:
logging.info("SavedModel at %s is a TF2 SavedModel", export_dir)
return True
except Exception: # pylint: disable=broad-exception-caught
logging.info(
"Failed to read fingerprint from SavedModel. Parsing MetaGraph ..."
)
saved_model_proto = loader_impl.parse_saved_model(export_dir)
if len(
saved_model_proto.meta_graphs
) == 1 and saved_model_proto.meta_graphs[0].HasField("object_graph_def"):
logging.info("SavedModel at %s is a TF2 SavedModel", export_dir)
return True
logging.info("SavedModel at %s is a TF1 SavedModel", export_dir)
return False
|
Loader
|
python
|
getsentry__sentry
|
tests/sentry/incidents/endpoints/test_serializers.py
|
{
"start": 40678,
"end": 58006
}
|
class ____(TestAlertRuleSerializerBase):
def mock_conversations_info(self, channel):
return mock_slack_response(
"conversations_info",
body={"ok": True, "channel": channel},
req_args={"channel": channel},
)
def patch_msg_schedule_response(self, channel_id, result_name="channel"):
body = {
"ok": True,
result_name: channel_id,
"scheduled_message_id": "Q1298393284",
}
return mock_slack_response("chat_scheduleMessage", body)
@cached_property
def other_project(self):
return self.create_project()
@cached_property
def alert_rule(self):
return self.create_alert_rule(projects=[self.project, self.other_project])
@cached_property
def trigger(self):
return create_alert_rule_trigger(self.alert_rule, "hello", 100)
@cached_property
def valid_params(self):
return {
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.EMAIL
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[AlertRuleTriggerAction.TargetType.USER],
"target_identifier": self.user.id,
}
@cached_property
def access(self):
return from_user(self.user, self.organization)
@cached_property
def context(self):
return {
"organization": self.organization,
"access": self.access,
"user": self.user,
"alert_rule": self.alert_rule,
"trigger": self.trigger,
}
@cached_property
def sentry_app(self):
return self.create_sentry_app(
organization=self.organization,
published=True,
verify_install=False,
name="Super Awesome App",
schema={
"elements": [
self.create_alert_rule_action_schema(),
]
},
)
@cached_property
def sentry_app_installation(self):
return self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.organization, user=self.user
)
def run_fail_validation_test(self, params, errors):
base_params = self.valid_params.copy()
base_params.update(params)
serializer = AlertRuleTriggerActionSerializer(context=self.context, data=base_params)
assert not serializer.is_valid()
assert serializer.errors == errors
def test_simple(self) -> None:
serializer = AlertRuleTriggerActionSerializer(context=self.context, data=self.valid_params)
assert serializer.is_valid()
assert serializer.validated_data["type"] == AlertRuleTriggerAction.Type.EMAIL.value
assert (
serializer.validated_data["target_type"] == AlertRuleTriggerAction.TargetType.USER.value
)
assert serializer.validated_data["target_identifier"] == str(self.user.id)
def test_validation_no_params(self) -> None:
serializer = AlertRuleTriggerActionSerializer(context=self.context, data={})
assert not serializer.is_valid()
field_is_required = ["This field is required."]
assert serializer.errors == {
"type": field_is_required,
"targetType": field_is_required,
"targetIdentifier": field_is_required,
}
def test_type(self) -> None:
valid_slugs = AlertRuleTriggerAction.get_all_slugs()
invalid_values = [f"Invalid type, valid values are {valid_slugs!r}"]
self.run_fail_validation_test({"type": 50}, {"type": invalid_values})
def test_target_type(self) -> None:
invalid_values = [
"Invalid targetType, valid values are [%s]"
% ", ".join(STRING_TO_ACTION_TARGET_TYPE.keys())
]
self.run_fail_validation_test({"target_type": 50}, {"targetType": invalid_values})
def test_user_perms(self) -> None:
self.run_fail_validation_test(
{
"target_type": ACTION_TARGET_TYPE_TO_STRING[AlertRuleTriggerAction.TargetType.USER],
"target_identifier": "1234567",
},
{"nonFieldErrors": ["User does not belong to this organization"]},
)
other_user = self.create_user()
self.run_fail_validation_test(
{
"target_type": ACTION_TARGET_TYPE_TO_STRING[AlertRuleTriggerAction.TargetType.USER],
"target_identifier": str(other_user.id),
},
{"nonFieldErrors": ["User does not belong to this organization"]},
)
def test_invalid_priority(self) -> None:
self.run_fail_validation_test(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.MSTEAMS
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"priority": "P1",
},
{
"priority": [
ErrorDetail("Can only be set for Pagerduty or Opsgenie", code="invalid")
]
},
)
self.run_fail_validation_test(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.PAGERDUTY
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"priority": "P1",
},
{
"priority": [
ErrorDetail(
f"Allowed priorities for Pagerduty are {str(PAGERDUTY_CUSTOM_PRIORITIES)}",
code="invalid",
)
]
},
)
self.run_fail_validation_test(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.OPSGENIE
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"priority": "critical",
},
{
"priority": [
ErrorDetail(
f"Allowed priorities for Opsgenie are {str(OPSGENIE_CUSTOM_PRIORITIES)}",
code="invalid",
)
]
},
)
@patch(
"sentry.incidents.logic.get_target_identifier_display_for_integration",
return_value=AlertTarget("test", "test"),
)
def test_pagerduty_valid_priority(self, mock_get: MagicMock) -> None:
params = {
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.PAGERDUTY
).slug,
"targetType": ACTION_TARGET_TYPE_TO_STRING[AlertRuleTriggerAction.TargetType.SPECIFIC],
"targetIdentifier": "123",
"priority": "critical",
}
serializer = AlertRuleTriggerActionSerializer(data=params, context=self.context)
assert serializer.is_valid()
action = serializer.save()
assert action.sentry_app_config["priority"] == "critical"
@patch(
"sentry.incidents.logic.get_target_identifier_display_for_integration",
return_value=AlertTarget("test", "test"),
)
def test_opsgenie_valid_priority(self, mock_get: MagicMock) -> None:
params = {
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.OPSGENIE
).slug,
"targetType": ACTION_TARGET_TYPE_TO_STRING[AlertRuleTriggerAction.TargetType.SPECIFIC],
"targetIdentifier": "123",
"priority": "P1",
}
serializer = AlertRuleTriggerActionSerializer(data=params, context=self.context)
assert serializer.is_valid()
action = serializer.save()
assert action.sentry_app_config["priority"] == "P1"
def test_discord(self) -> None:
self.run_fail_validation_test(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.DISCORD
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"target_identifier": "123",
},
{"integration": ["Integration must be provided for discord"]},
)
def test_slack(self) -> None:
self.run_fail_validation_test(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.SLACK
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[AlertRuleTriggerAction.TargetType.USER],
"target_identifier": "123",
},
{"targetType": ["Invalid target type for slack. Valid types are [specific]"]},
)
self.run_fail_validation_test(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.SLACK
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"target_identifier": "123",
},
{"integration": ["Integration must be provided for slack"]},
)
base_params = self.valid_params.copy()
base_params.update(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.SLACK
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"target_identifier": "123",
"integration": str(self.integration.id),
}
)
serializer = AlertRuleTriggerActionSerializer(context=self.context, data=base_params)
assert serializer.is_valid()
with pytest.raises(serializers.ValidationError):
serializer.save()
def test_valid_slack_channel_id_sdk(self) -> None:
"""
Test that when a valid Slack channel ID is provided, we look up the channel name and validate it against the targetIdentifier.
"""
base_params = self.valid_params.copy()
base_params.update(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.SLACK
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"target_identifier": "merp",
"integration": str(self.integration.id),
}
)
context = self.context.copy()
context.update({"input_channel_id": "CSVK0921"})
channel = {"name": "merp", "id": "CSVK0921"}
with self.mock_conversations_info(channel):
serializer = AlertRuleTriggerActionSerializer(context=context, data=base_params)
assert serializer.is_valid()
serializer.save()
# # Make sure the action was created.
alert_rule_trigger_actions = list(
AlertRuleTriggerAction.objects.filter(integration_id=self.integration.id)
)
assert len(alert_rule_trigger_actions) == 1
def test_invalid_slack_channel_id_sdk(self) -> None:
"""
Test that an invalid Slack channel ID is detected and blocks the action from being saved.
"""
base_params = self.valid_params.copy()
base_params.update(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.SLACK
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"target_identifier": "merp",
"integration": str(self.integration.id),
}
)
context = self.context.copy()
context.update({"input_channel_id": "M40W931"})
with patch(
"slack_sdk.web.client.WebClient.conversations_info",
side_effect=SlackApiError("", response={"ok": False, "error": "channel_not_found"}),
):
serializer = AlertRuleTriggerActionSerializer(context=context, data=base_params)
assert not serializer.is_valid()
# # Make sure the action was not created.
alert_rule_trigger_actions = list(
AlertRuleTriggerAction.objects.filter(integration_id=self.integration.id)
)
assert len(alert_rule_trigger_actions) == 0
@responses.activate
def test_invalid_slack_channel_name(self) -> None:
"""
Test that an invalid Slack channel name is detected and blocks the action from being saved.
"""
base_params = self.valid_params.copy()
base_params.update(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.SLACK
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"target_identifier": "123",
"integration": str(self.integration.id),
}
)
context = self.context.copy()
context.update({"input_channel_id": "CSVK0921"})
with self.mock_conversations_info({"name": "merp", "id": "CSVK0921"}):
serializer = AlertRuleTriggerActionSerializer(context=context, data=base_params)
assert not serializer.is_valid()
# # Make sure the action was not created.
alert_rule_trigger_actions = list(
AlertRuleTriggerAction.objects.filter(integration_id=self.integration.id)
)
assert len(alert_rule_trigger_actions) == 0
def test_sentry_app_action_missing_params(self) -> None:
self.run_fail_validation_test(
{
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.SENTRY_APP
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SENTRY_APP
],
"target_identifier": "123",
"sentry_app": self.sentry_app.id,
"sentry_app_config": {"tag": "asdfasdfads"},
},
{"sentryApp": ["Missing parameter: sentry_app_installation_uuid"]},
)
def test_create_and_update_sentry_app_action_success(self) -> None:
serializer = AlertRuleTriggerActionSerializer(
context=self.context,
data={
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.SENTRY_APP
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SENTRY_APP
],
"target_identifier": "1",
"sentry_app": self.sentry_app.id,
"sentry_app_config": {"channel": "#general"},
"sentry_app_installation_uuid": self.sentry_app_installation.uuid,
},
)
assert serializer.is_valid()
# Create action
serializer.save()
# # Make sure the action was created.
alert_rule_trigger_actions = list(
AlertRuleTriggerAction.objects.filter(sentry_app_id=self.sentry_app.id)
)
assert len(alert_rule_trigger_actions) == 1
# Update action
serializer = AlertRuleTriggerActionSerializer(
context=self.context,
data={
"type": AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.SENTRY_APP
).slug,
"target_type": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SENTRY_APP
],
"target_identifier": "1",
"sentry_app": self.sentry_app.id,
"sentry_app_config": {"channel": "#announcements"},
"sentry_app_installation_uuid": self.sentry_app_installation.uuid,
},
instance=alert_rule_trigger_actions[0],
)
assert serializer.is_valid()
# Update action
serializer.save()
alert_rule_trigger_action = AlertRuleTriggerAction.objects.get(
sentry_app_id=self.sentry_app.id
)
# Make sure the changes got applied
assert alert_rule_trigger_action.sentry_app_config == {"channel": "#announcements"}
|
TestAlertRuleTriggerActionSerializer
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/_client.py
|
{
"start": 18861,
"end": 19815
}
|
class ____:
_client: Anthropic
def __init__(self, client: Anthropic) -> None:
self._client = client
@cached_property
def completions(self) -> completions.CompletionsWithRawResponse:
from .resources.completions import CompletionsWithRawResponse
return CompletionsWithRawResponse(self._client.completions)
@cached_property
def messages(self) -> messages.MessagesWithRawResponse:
from .resources.messages import MessagesWithRawResponse
return MessagesWithRawResponse(self._client.messages)
@cached_property
def models(self) -> models.ModelsWithRawResponse:
from .resources.models import ModelsWithRawResponse
return ModelsWithRawResponse(self._client.models)
@cached_property
def beta(self) -> beta.BetaWithRawResponse:
from .resources.beta import BetaWithRawResponse
return BetaWithRawResponse(self._client.beta)
|
AnthropicWithRawResponse
|
python
|
huggingface__transformers
|
src/transformers/cli/serve.py
|
{
"start": 11574,
"end": 78507
}
|
class ____:
# Defining a class to help with internal state but in practice it's just a method to call
# TODO: refactor into a proper module with helpers + 1 main method
def __init__(
self,
continuous_batching: Annotated[
bool, typer.Option(help="Whether to use continuous batching for chat completions.")
] = False,
device: Annotated[
str,
typer.Option(
help="Device to use for inference; will default to `auto` and place the model on an accelerator if available."
),
] = "auto",
dtype: Annotated[
str | None,
typer.Option(
help="Override the default `torch.dtype` and load the model under this dtype. If `'auto'` is passed, the dtype will be automatically derived from the model's weights."
),
] = "auto",
trust_remote_code: Annotated[
bool, typer.Option(help="Whether to trust remote code when loading a model.")
] = False,
attn_implementation: Annotated[
str | None,
typer.Option(
help="Which attention implementation to use; you can run --attn_implementation=flash_attention_2, in which case you must install this manually by running `pip install flash-attn --no-build-isolation`."
),
] = None,
quantization: Annotated[
Optional[str],
typer.Option(help="Which quantization method to use. choices: 'bnb-4bit', 'bnb-8bit'"),
] = None,
host: Annotated[str, typer.Option(help="Interface the server will listen to.")] = "localhost",
port: Annotated[int, typer.Option(help="Port the server will listen to.")] = 8000,
model_timeout: Annotated[
int, typer.Option(help="Time in seconds after which a model will be removed from memory.")
] = 300,
log_level: Annotated[
str, typer.Option(help="Logging level as a string. Example: 'info' or 'warning'.")
] = "info",
default_seed: Annotated[
int | None, typer.Option(help="The default seed for torch, should be an integer.")
] = None,
enable_cors: Annotated[
bool,
typer.Option(
help="Whether to enable CORS. Some apps that make requests from external domains (e.g. Cursor) require CORS to be enabled."
),
] = False,
input_validation: Annotated[bool, typer.Option(help="Whether to turn on strict input validation.")] = False,
force_model: Annotated[
str | None,
typer.Option(
help="Name of the model to be forced on all requests. This is useful for testing Apps that don't allow changing models in the request."
),
] = None,
non_blocking: Annotated[
bool, typer.Option(hidden=True, help="Whether to run the server in a separate thread.")
] = False,
) -> None:
if not serve_dependencies_available:
raise ImportError(
"Missing dependencies for the serving CLI. Please install with `pip install transformers[serving]`"
)
# Save input arguments
self.continuous_batching = continuous_batching
self.device = device
self.dtype = dtype
self.trust_remote_code = trust_remote_code
self.attn_implementation = attn_implementation
self.quantization = quantization
self.host = host
self.port = port
self.model_timeout = model_timeout
self.log_level = log_level
self.default_seed = default_seed
self.enable_cors = enable_cors
self.input_validation = input_validation
self.force_model = force_model
self.non_blocking = non_blocking
# Seed
if default_seed is not None:
set_torch_seed(default_seed)
# Set up logging
transformers_logger = logging.get_logger("transformers")
transformers_logger.setLevel(logging.log_levels[log_level.lower()])
cb_logger = logging.get_logger("transformers.generation.continuous_batching")
cb_logger.setLevel(logging.log_levels[log_level.lower()])
# Internal state:
# 1. Tracks models in memory, to prevent reloading the model unnecessarily
self.loaded_models: dict[str, TimedModel] = {}
self.running_continuous_batching_manager: ContinuousBatchingManager | None = None
# 2. preserves information about the last call and last KV cache, to determine whether we can reuse the KV
# cache and avoid re-running prefill
self.last_messages = None
self.last_kv_cache = None
self.last_model = None
if self.model_timeout is None:
self.model_timeout = -1 if self.force_model else 300
if self.force_model:
model_id_and_revision = self.process_model_name(self.force_model)
self.last_model = model_id_and_revision
self.load_model_and_processor(model_id_and_revision)
@asynccontextmanager
async def lifespan(app: FastAPI):
yield
for model in self.loaded_models.values():
model.delete_model()
if self.running_continuous_batching_manager is not None:
self.running_continuous_batching_manager.stop(block=True, timeout=5)
app = FastAPI(lifespan=lifespan)
# Some apps that make requests from external domains (e.g. Cursor) require CORS to be enabled. However, for
# security purposes, it's disabled by default
if self.enable_cors:
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
logger.warning_once(
"CORS allow origin is set to `*`. This is not recommended for production environments."
)
from fastapi import Request
@app.post("/v1/chat/completions")
def chat_completion(request: Request, body: dict):
self.validate_chat_completion_request(request=body)
if self.continuous_batching:
return self.continuous_batching_chat_completion(body, request.state.request_id)
else:
return self.generate_chat_completion(body)
@app.post("/v1/responses")
def responses(request: dict):
self.validate_response_request(request=request)
# Support non-streaming mode when `stream=false` is provided
stream = request.get("stream", True)
if not stream:
response_obj = self.generate_response_non_streaming(request)
return JSONResponse(response_obj)
output = self.generate_response(request)
return StreamingResponse(output, media_type="text/event-stream")
@app.post("/v1/audio/transcriptions")
async def audio_transcriptions(request: Request):
# Parses the multipart/form-data request into the request format used by other endpoints
async with request.form() as form:
parsed_request = TransformersTranscriptionCreateParams(
file=await form["file"].read(),
model=form["model"],
# TODO: add other fields
)
logger.debug(
f"Received file: {form['file'].filename}; MIME type: {form['file'].content_type}; "
f"size: {form['file'].size / 1024:.2f} KiB"
)
self.validate_transcription_request(request=parsed_request)
output = self.generate_transcription(parsed_request)
return StreamingResponse(output, media_type="text/event-stream")
@app.options("/v1/models")
@app.get("/v1/models")
def get_all_models():
return JSONResponse({"object": "list", "data": self.get_gen_models()})
@app.get("/health")
def healthcheck():
return JSONResponse({"status": "ok"})
@app.middleware("http")
async def get_or_set_request_id(request: Request, call_next):
request_id = request.headers.get(X_REQUEST_ID) or str(uuid.uuid4())
request.state.request_id = request_id
response = await call_next(request)
response.headers[X_REQUEST_ID] = request_id
return response
config = uvicorn.Config(app, host=self.host, port=self.port, log_level=self.log_level)
self.server = uvicorn.Server(config)
if self.non_blocking:
self.start_server()
else:
self.server.run()
def start_server(self):
def _run():
self._loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._loop)
# serve() is a coroutine; it exits when server.should_exit becomes True
self._loop.run_until_complete(self.server.serve())
self._thread = threading.Thread(target=_run, name="uvicorn-thread", daemon=False)
self._thread.start()
def kill_server(self):
if not self._thread:
raise ValueError("The server cannot be killed as it was not launched in a separate thread.")
if not self._thread.is_alive():
raise ValueError("The server is already killed.")
self.server.should_exit = True
if self._thread and self._thread.is_alive():
self._thread.join(timeout=2)
def _validate_request(
self,
request: dict,
schema: TypedDict,
validator: TypeAdapter,
unused_fields: set,
):
"""
Validates the request against the schema, and checks for unexpected keys.
Args:
request (`dict`):
The request to validate.
schema (`TypedDict`):
The schema of the request to validate. It is a `TypedDict` definition.
validator (`TypeAdapter`):
The validator to use to validate the request. Built from `schema`.
unused_fields (`set`):
Fields accepted by `schema`, but not used in `transformers serve`.
Raises:
HTTPException: If the request is invalid or contains unexpected or unused fields.
"""
logger.debug(f"Validating request: {request}")
# Validate unexpected keys -- Pydantic doesn't validate extra keys in the request.
input_keys = set(request.keys())
possible_keys = schema.__mutable_keys__
unexpected_keys = input_keys - possible_keys
if unexpected_keys:
logger.error(f"Unexpected keys in the request: {unexpected_keys}")
raise HTTPException(status_code=422, detail=f"Unexpected keys in the request: {unexpected_keys}")
if self.input_validation:
# Validate expected keys
try:
validator.validate_python(request)
except ValidationError as e:
logger.error(f"Validation error: {e.errors()}")
raise HTTPException(status_code=422, detail=e.errors())
# Validate unused fields
unused_fields_in_request = input_keys & unused_fields
if unused_fields_in_request:
logger.error(f"Unused fields in the request: {unused_fields_in_request}")
raise HTTPException(
status_code=422, detail=f"Unused fields in the request: {unused_fields_in_request}"
)
def validate_response_request(self, request: dict):
self._validate_request(
request=request,
schema=TransformersResponseCreateParamsStreaming,
validator=response_validator,
unused_fields=UNUSED_RESPONSE_FIELDS,
)
def validate_chat_completion_request(self, request: dict):
self._validate_request(
request=request,
schema=TransformersCompletionCreateParamsStreaming,
validator=completion_validator,
unused_fields=UNUSED_CHAT_COMPLETION_FIELDS,
)
def validate_transcription_request(self, request: dict):
self._validate_request(
request=request,
schema=TransformersTranscriptionCreateParams,
validator=transcription_validator,
unused_fields=UNUSED_TRANSCRIPTION_FIELDS,
)
def build_chat_completion_chunk(
self,
request_id: str = "",
content: int | None = None,
model: str | None = None,
role: str | None = None,
finish_reason: str | None = None,
tool_calls: list[ChoiceDeltaToolCall] | None = None,
decode_stream: DecodeStream | None = None,
tokenizer: Optional["PreTrainedTokenizerFast"] = None,
) -> ChatCompletionChunk:
"""
Builds a chunk of a streaming OpenAI Chat Completion response.
IMPORTANT: The serialized chunk won't contain empty fields (fields with `None`). Some downstream apps,
like Cursor, assume that when the field exists, it has data.
Args:
request_id (`str`):
The request ID.
content (`str`, *optional*):
Content of the response from the model.
model (`str`, *optional*):
The model that generated the content.
role (`str`, *optional*):
The role of the next content, until a new role is defined.
finish_reason (`str`, *optional*):
The reason the generation by the model has finished.
tool_calls (`list[ChoiceDeltaToolCall]`, *optional*):
Data about the tool calls, when they are triggered.
Returns:
`str`: The built chunk, a string containing a JSON string with the payload.
"""
if decode_stream is not None and content is not None and tokenizer is not None:
content = decode_stream.step(tokenizer._tokenizer, content)
chunk = ChatCompletionChunk(
id=request_id,
created=int(time.time()),
model=model,
choices=[
ChoiceChunk(
delta=ChoiceDelta(
content=content,
role=role,
tool_calls=tool_calls,
),
index=0,
finish_reason=finish_reason,
)
],
system_fingerprint="",
object="chat.completion.chunk",
)
return chunk
@staticmethod
def chunk_to_sse_element(chunk: ChatCompletionChunk | BaseModel) -> str:
"""
Builds an event of a streaming OpenAI Response model or a ChatCompletion chunk.
IMPORTANT: The serialized chunk won't contain empty fields (fields with `None`). Some downstream apps,
like Cursor, assume that when the field exists, it has data.
Args:
chunk (`BaseModel` or `ChatCompletionChunk`):
The response to build an event from. One of the multiple OpenAI Response output types
Returns:
`str`: The built chunk, a string containing a JSON string with the payload.
"""
return f"data: {chunk.model_dump_json(exclude_none=True)}\n\n"
@staticmethod
@lru_cache
def get_gen_models(cache_dir: str | None = None) -> list[dict[str, any]]:
"""
List LLMs and VLMs in the cache.
"""
from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES,
)
generative_models = []
logger.warning("Scanning the cache directory for LLMs and VLMs.")
for repo in tqdm(scan_cache_dir(cache_dir).repos):
if repo.repo_type != "model":
continue
refs = repo.refs
for ref, revision_info in refs.items():
files = revision_info.files
config_path = next((f.file_path for f in files if f.file_name == "config.json"), None)
if not config_path:
continue
config = json.loads(config_path.open().read())
if not (isinstance(config, dict) and "architectures" in config):
continue
architectures = config["architectures"]
llms = MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values()
vlms = MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES.values()
if any(arch for arch in architectures if arch in [*llms, *vlms]):
author = repo.repo_id.split("/") if "/" in repo.repo_id else ""
repo_handle = repo.repo_id + (f"@{ref}" if ref != "main" else "")
generative_models.append(
{
"owned_by": author,
"id": repo_handle,
"object": "model",
"created": repo.last_modified,
}
)
return generative_models
def continuous_batching_chat_completion(self, req: dict, request_id: str) -> StreamingResponse | JSONResponse:
"""
Generates an OpenAI Chat Completion using continuous batching.
Args:
req (`dict`): The request to generate an OpenAI Chat Completion for.
Returns:
`Generator[str, None, None]`: A generator that yields the OpenAI Chat Completion chunks.
"""
model_id_and_revision = self.process_model_name(req["model"])
must_discard_cache = model_id_and_revision != self.last_model
self.last_model = model_id_and_revision
# When switching models, terminate a continuous batching manager if it is running.
if must_discard_cache:
if self.running_continuous_batching_manager is not None:
self.running_continuous_batching_manager.stop(block=True, timeout=2)
self.running_continuous_batching_manager = None
model, processor = self.load_model_and_processor(model_id_and_revision)
tokenizer = processor.tokenizer if hasattr(processor, "tokenizer") else processor
generation_config = create_generation_config_from_req(
req,
model_generation_config=model.generation_config,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
use_cache=False,
do_sample=False,
scheduler="fifo",
)
if self.running_continuous_batching_manager is None:
self.running_continuous_batching_manager = model.init_continuous_batching(
generation_config=generation_config
)
# TODO (Joao, Lysandre): the logits processors should be fixed in continuous batching and correctly applied in non-cb
self.running_continuous_batching_manager.logit_processor = LogitsProcessorList()
self.running_continuous_batching_manager.start()
# TODO (Joao, Lysandre): this should also work with tool support
inputs = processor.apply_chat_template(req["messages"], return_tensors="pt", add_generation_prompt=True).to(
model.device
)["input_ids"][0]
def stream_chat_completion(request_id, decode_stream):
from ..generation.continuous_batching import RequestStatus
try:
# Emit the assistant role to start the stream. Other chunks won't have a role, as it is implicit
# they come from the assistant.
yield self.build_chat_completion_chunk(request_id, role="assistant", model=model_id_and_revision)
n_tokens_generated = 0
for result in self.running_continuous_batching_manager.request_id_iter(request_id):
n_tokens_generated += 1
if result.status == RequestStatus.FINISHED:
generated_all_tokens = n_tokens_generated >= generation_config.max_new_tokens
final_token_is_eos = result == tokenizer.eos_token
reason = "length" if (generated_all_tokens and not final_token_is_eos) else "stop"
yield self.build_chat_completion_chunk(
request_id,
finish_reason=reason,
model=model_id_and_revision,
)
break
else:
yield self.build_chat_completion_chunk(
request_id=request_id,
content=result.generated_tokens[-1],
model=model_id_and_revision,
decode_stream=decode_stream,
tokenizer=tokenizer,
)
except Exception as e:
logger.error(str(e))
self.running_continuous_batching_manager.cancel_request(request_id)
yield f'data: {{"error": "{str(e)}"}}'
def buffer_chat_completion(_request_id):
result = None
while self.running_continuous_batching_manager.is_running() and result is None:
result = self.running_continuous_batching_manager.get_result(request_id=_request_id, timeout=1)
content = tokenizer.decode(result.generated_tokens)
chat_completion_result = ChatCompletion(
id=_request_id,
created=int(time.time()),
object="chat.completion",
model=model_id_and_revision,
choices=[
Choice(
# TODO check the index
index=0,
message=ChatCompletionMessage(content=content, role="assistant"),
finish_reason="stop",
)
],
# TODO implement function calling
# TODO implement usage
)
return chat_completion_result
async def cancellation_wrapper_stream(_request_id):
# Enables cancellation in an async context
try:
decode_stream = DecodeStream(inputs.tolist(), False)
for _chunk in stream_chat_completion(_request_id, decode_stream):
yield self.chunk_to_sse_element(_chunk)
await asyncio.sleep(0)
except asyncio.CancelledError:
self.running_continuous_batching_manager.cancel_request(_request_id)
logger.warning(f"Request {_request_id} was cancelled.")
def cancellation_wrapper_buffer(_request_id):
# Enables cancellation in an async context
try:
return buffer_chat_completion(_request_id)
except asyncio.CancelledError:
self.running_continuous_batching_manager.cancel_request(_request_id)
logger.warning(f"Request {_request_id} was cancelled.")
request_id = self.running_continuous_batching_manager.add_request(
inputs, request_id=request_id, max_new_tokens=generation_config.max_new_tokens, streaming=req.get("stream")
)
if req.get("stream"):
return StreamingResponse(cancellation_wrapper_stream(request_id), media_type="text/event-stream")
else:
chunk = cancellation_wrapper_buffer(request_id)
json_chunk = chunk.model_dump_json(exclude_none=True)
return JSONResponse(json_chunk, media_type="application/json")
@staticmethod
def get_model_modality(model: "PreTrainedModel") -> Modality:
from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES,
)
model_classname = model.__class__.__name__
if model_classname in MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES.values():
modality = Modality.VLM
elif model_classname in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():
modality = Modality.LLM
else:
raise ValueError(f"Unknown modality: {model_classname}")
return modality
@staticmethod
def get_processor_inputs_from_inbound_messages(messages, modality: Modality):
processor_inputs = []
for message in messages:
parsed_message = {"role": message["role"], "content": []}
if modality == Modality.LLM:
# Input: `content` is a string or a list of dictionaries with a "text" key.
# Output: `content` is a string.
if isinstance(message["content"], str):
parsed_content = message["content"]
elif isinstance(message["content"], list):
parsed_content = []
for content in message["content"]:
if content["type"] == "text":
parsed_content.append(content["text"])
parsed_content = " ".join(parsed_content)
parsed_message["content"] = parsed_content
elif modality == Modality.VLM:
# Input: `content` is a string or a list of dictionaries with a "type" key (possible types: "text",
# "image_url").
# Output: `content` is a list of dictionaries with a "type" key
if isinstance(message["content"], str):
parsed_message["content"].append({"type": "text", "text": message["content"]})
else:
for content in message["content"]:
if content["type"] == "text":
parsed_message["content"].append(content)
elif content["type"] == "image_url":
if "base64" in content["image_url"]["url"]:
image_data = re.sub("^data:image/.+;base64,", "", content["image_url"]["url"])
image = Image.open(BytesIO(base64.b64decode(image_data)))
file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
url = file.name
image.save(file.name)
else:
url = content["image_url"]["url"]
parsed_message["content"].append({"type": "image", "url": url})
processor_inputs.append(parsed_message)
return processor_inputs
def generate_chat_completion(self, req: dict) -> StreamingResponse | JSONResponse:
"""
Generates an OpenAI Chat Completion using `generate`.
Args:
req (`dict`): The request to generate an OpenAI Chat Completion for.
Returns:
`Generator[str, None, None]`: A generator that yields the OpenAI Chat Completion chunks.
"""
# TODO: This should throw an error in case the specified model in the request is different to the forced model.
if self.force_model is not None:
req["model"] = self.force_model
messages: Iterable[ChatCompletionMessageParam] = req["messages"]
# HACK for tiny-agents: it sends a request after the assistant message (???). Let's assume we can't have a
# request whose last message is from the assistant.
if messages[-1]["role"] == "assistant":
return
model_id_and_revision = self.process_model_name(req["model"])
must_discard_cache = model_id_and_revision != self.last_model
self.last_model = model_id_and_revision
model, processor = self.load_model_and_processor(model_id_and_revision)
modality = self.get_model_modality(model)
processor_inputs = self.get_processor_inputs_from_inbound_messages(messages, modality)
# ====== TOOL PREPROCESSING LOGIC ======
tool_model_family = None
for supported_model_families in _MODELS_WITH_TOOL_SUPPORT:
if supported_model_families in model.config.architectures[0].lower():
tool_model_family = supported_model_families
break
# TODO: trigger 2 constrained generations after the tool call start token is emitted:
# 1. force generation to pick from the tool names
# 2. force generation to pick from that tool's arguments
# ====== END OF TOOL PREPROCESSING LOGIC ======
inputs = processor.apply_chat_template(
processor_inputs,
add_generation_prompt=True,
tools=req.get("tools"),
return_tensors="pt",
return_dict=True,
tokenize=True,
)
inputs = inputs.to(model.device)
request_id = req.get("request_id", "req_0")
# Temporary hack for GPTOSS 1: don't filter special tokens
skip_special_tokens = True
if "gptoss" in model.config.architectures[0].lower():
skip_special_tokens = False
generation_streamer = TextIteratorStreamer(
processor,
skip_special_tokens=skip_special_tokens,
skip_prompt=True,
)
generation_config = create_generation_config_from_req(req, model_generation_config=model.generation_config)
last_kv_cache = None
if self.is_continuation(req) and not must_discard_cache:
seq_len = self.last_kv_cache.get_seq_length()
if inputs["input_ids"].shape[-1] > seq_len:
last_kv_cache = self.last_kv_cache
generation_kwargs = {
**inputs,
"streamer": generation_streamer,
"generation_config": generation_config,
"return_dict_in_generate": True,
"past_key_values": last_kv_cache,
}
def stream_chat_completion(streamer, _request_id):
# Temporary hack for GPTOS 2: filter out the CoT tokens. Full solution here implies defining new output
# classes and piping the reasoning trace into a new field
filter_cot = False
cot_trace_end = None
if "gptoss" in model.config.architectures[0].lower():
filter_cot = True
cot_trace_end = "<|channel|>final<|message|>"
# Thin wrapper to save the KV cache after generation
def generate_with_cache(**kwargs):
generate_output = model.generate(**kwargs)
self.last_kv_cache = generate_output.past_key_values
thread = Thread(target=generate_with_cache, kwargs=generation_kwargs)
results = ""
try:
thread.start()
tool_state = ToolState()
# Emit the assistant role to start the stream. Other chunks won't have a role, as it is implicit
# they come from the assistant.
yield self.build_chat_completion_chunk(request_id, role="assistant", model=model_id_and_revision)
result = ""
n_tokens_generated = 0
for result in streamer:
n_tokens_generated += 1
# Temporary hack for GPT-OSS 3: don't emit the final "<|return|>"
if "gptoss" in model.config.architectures[0].lower():
result = result.removesuffix("<|return|>")
results += result
# (related to temporary hack 2)
if filter_cot:
if cot_trace_end in results: # end of reasoning trace observed -> stop filtering
filter_cot = False
continue
else:
continue
# ====== TOOL CALL LOGIC ======
if tool_model_family is not None:
# Start of a tool call: reset state variables, set `inside_tool_call`
if result.strip() == _TOOL_CALL_TOKENS[tool_model_family]["start"]:
tool_state.inside_tool_call = True
continue
# End of tool call: reset `inside_tool_call`, emit a `finish_reason`
if result.strip() == _TOOL_CALL_TOKENS[tool_model_family]["end"]:
tool_state.reset()
yield self.build_chat_completion_chunk(
request_id=_request_id,
role=None,
finish_reason="tool_calls",
model=model_id_and_revision,
)
continue
# Inside a tool call
if tool_state.inside_tool_call:
tool_state.buffer += result
# First step: extract the tool name (may need several tokens, and we can't emit a delta
# until we have the full name)
if not tool_state.has_tool_name_defined:
tool_name = re.search(r"\"name\": \"(.*?)\"", tool_state.buffer)
if tool_name is None:
continue
else:
tool_name = tool_name.group(1)
tool_state.has_tool_name_defined = True
tool = ChoiceDeltaToolCall(
function=ChoiceDeltaToolCallFunction(name=tool_name),
index=0,
type="function",
id=_request_id + "_tool_call", # Only the first tool call delta has an id
)
# Second step: extract tool arguments. The tool arguments can be seen as a json string
# within the tool json string. We emit a delta for the arguments.
else:
# Empty text: skip
if result == "":
continue
# Until we see the `"arguments": {` in the buffer, we skip
# TODO: other models will likely need more elaborate processing here
if '"arguments": {' not in tool_state.buffer:
continue
# Handle nesting. We want to exclude the last } from the emitted arguments (it's
# closing the outermost nesting level, outside the arguments block)
tool_state.arg_nesting_level += result.count("{")
tool_state.arg_nesting_level -= result.count("}")
if tool_state.arg_nesting_level < 0:
result = "".join(result.split("}")[:-2]) + "}" # e.g. "4}}\n" -> "4}"
tool = ChoiceDeltaToolCall(
function=ChoiceDeltaToolCallFunction(arguments=result),
index=0,
type="function",
)
yield self.build_chat_completion_chunk(
request_id=_request_id,
role=None,
tool_calls=[tool],
model=model_id_and_revision,
)
continue
# ====== END OF TOOL CALL LOGIC ======
# All non-tool related tokens are emitted as assistant messages. Empty text is skipped.
if result != "":
yield self.build_chat_completion_chunk(
_request_id, content=result, model=model_id_and_revision
)
generated_all_tokens = n_tokens_generated >= generation_config.max_new_tokens
final_token_is_eos = result == streamer.tokenizer.eos_token
reason = "length" if (generated_all_tokens and not final_token_is_eos) else "stop"
yield self.build_chat_completion_chunk(_request_id, finish_reason=reason, model=model_id_and_revision)
thread.join()
except Exception as e:
logger.error(str(e))
yield f'data: {{"error": "{str(e)}"}}'
finally:
thread.join()
if req.get("stream"):
return StreamingResponse(
map(self.chunk_to_sse_element, stream_chat_completion(generation_streamer, request_id)),
media_type="text/event-stream",
)
else:
content = []
finish_reason = "stop"
generator = stream_chat_completion(generation_streamer, request_id)
usage = None
for chunk in generator:
choice = chunk.choices[0]
if getattr(choice.delta, "content", None):
content.append(choice.delta.content)
if choice.finish_reason:
finish_reason = choice.finish_reason
if getattr(chunk, "usage", None):
usage = chunk.usage
chat_completion_result = ChatCompletion(
id=request_id,
created=int(time.time()),
object="chat.completion",
model=model_id_and_revision,
choices=[
Choice(
# TODO check the index
index=0,
message=ChatCompletionMessage(content="".join(content), role="assistant"),
finish_reason=finish_reason,
)
],
# TODO implement function calling
usage=usage,
)
result = chat_completion_result.model_dump(exclude_none=True)
return JSONResponse(result, media_type="application/json")
def generate_response(self, req: dict) -> Generator[str, None, None]:
"""
Generates an OpenAI Response using `generate`.
Args:
req (`dict`): The request to generate an OpenAI Response for.
Returns:
`Generator[str, None, None]`: A generator that yields the OpenAI Response events.
"""
# TODO -- Implement non-streaming mode
model_id_and_revision = self.process_model_name(req["model"])
must_discard_cache = model_id_and_revision != self.last_model
self.last_model = model_id_and_revision
model, processor = self.load_model_and_processor(model_id_and_revision)
if isinstance(req["input"], str):
inputs = [{"role": "system", "content": req["instructions"]}] if "instructions" in req else []
inputs.append({"role": "user", "content": req["input"]})
elif isinstance(req["input"], list):
if "instructions" in req:
if req["input"][0]["role"] != "system":
inputs = [{"role": "system", "content": req["instructions"]}, *req["input"]]
else:
inputs = req["input"]
inputs[0]["content"] = req["instructions"]
else:
inputs = req["input"]
elif isinstance(req["input"], dict):
inputs = [{"role": "system", "content": req["instructions"]}] if "instructions" in req else []
inputs.append(req["input"])
else:
raise TypeError("inputs should be a list, dict, or str")
inputs = processor.apply_chat_template(inputs, add_generation_prompt=True, return_tensors="pt")["input_ids"]
inputs = inputs.to(model.device)
request_id = req.get("previous_response_id", "req_0")
# Temporary hack for GPT-OSS 1: don't filter special tokens
skip_special_tokens = True
if "gptoss" in model.config.architectures[0].lower():
skip_special_tokens = False
generation_streamer = TextIteratorStreamer(
processor,
skip_special_tokens=skip_special_tokens,
skip_prompt=True,
)
generation_config = create_generation_config_from_req(req, model_generation_config=model.generation_config)
last_kv_cache = None
if self.is_continuation(req) and not must_discard_cache:
seq_len = self.last_kv_cache.get_seq_length()
if inputs["input_ids"].shape[-1] > seq_len:
last_kv_cache = self.last_kv_cache
generation_kwargs = {
"inputs": inputs,
"attention_mask": torch_ones_like(inputs),
"streamer": generation_streamer,
"generation_config": generation_config,
"return_dict_in_generate": True,
"past_key_values": last_kv_cache,
}
def stream_response(streamer, _request_id):
# Temporary hack for GPT-OSS 2: filter out the CoT tokens. Full solution here implies defining new output
# classes and piping the reasoning trace into a new field
filter_cot = False
cot_trace_end = None
if "gptoss" in model.config.architectures[0].lower():
filter_cot = True
cot_trace_end = "<|channel|>final<|message|>"
# Thin wrapper to save the KV cache after generation
def generate_with_cache(**kwargs):
generate_output = model.generate(**kwargs)
self.last_kv_cache = generate_output.past_key_values
thread = Thread(target=generate_with_cache, kwargs=generation_kwargs)
sequence_number = 0
output_index = 0
content_index = 0
try:
thread.start()
created_at = time.time() # the spec expects a unix timestamp in seconds
# We start by acknowledging the request (the request has `status="queued"`), and then by moving it to
# in progress (`status="in_progress"`)
response_created = ResponseCreatedEvent(
type="response.created",
sequence_number=sequence_number,
response=Response(
id=f"resp_{request_id}",
created_at=created_at,
status="queued",
model=model_id_and_revision,
instructions=req.get("instructions"),
text={"format": {"type": "text"}},
object="response",
tools=[],
output=[],
parallel_tool_calls=req.get("parallel_tool_calls", False),
tool_choice="auto",
metadata=req.get("metadata"),
),
)
sequence_number += 1
yield self.chunk_to_sse_element(response_created)
response_in_progress = ResponseInProgressEvent(
type="response.in_progress",
sequence_number=sequence_number,
response=Response(
id=f"resp_{request_id}",
created_at=created_at,
status="in_progress",
model=model_id_and_revision,
instructions=req.get("instructions"),
text={"format": {"type": "text"}},
object="response",
tools=[],
output=[],
parallel_tool_calls=req.get("parallel_tool_calls", False),
tool_choice="auto",
metadata=req.get("metadata"),
),
)
sequence_number += 1
yield self.chunk_to_sse_element(response_in_progress)
# Start the output item. Emit the assistant role to start the stream. Other chunks won't have a role,
# as it is implicit
response_output_item_added = ResponseOutputItemAddedEvent(
type="response.output_item.added",
sequence_number=sequence_number,
output_index=output_index,
item=ResponseOutputMessage(
id=f"msg_{request_id}", type="message", status="in_progress", role="assistant", content=[]
),
)
sequence_number += 1
yield self.chunk_to_sse_element(response_output_item_added)
# Start the content part of the event
response_content_part_added = ResponseContentPartAddedEvent(
type="response.content_part.added",
item_id=f"msg_{request_id}",
sequence_number=sequence_number,
output_index=output_index,
content_index=content_index,
part=ResponseOutputText(type="output_text", text="", annotations=[]),
)
sequence_number += 1
yield self.chunk_to_sse_element(response_content_part_added)
# Stream the actual generated text
results = ""
for result in streamer:
# Temporary hack for GPTOS 3: don't emit the final "<|return|>"
if "gptoss" in model.config.architectures[0].lower():
result = result.removesuffix("<|return|>")
results += result
# (related to temporary hack 2)
if filter_cot:
if cot_trace_end in results: # end of reasoning trace observed -> stop filtering
filter_cot = False
results = "" # reset the results -> results will now track the final response
continue
else:
response_output_text_delta = ResponseTextDeltaEvent(
type="response.output_text.delta",
item_id=f"msg_{request_id}",
sequence_number=sequence_number,
output_index=output_index,
content_index=content_index,
delta=result,
logprobs=[],
)
sequence_number += 1
yield self.chunk_to_sse_element(response_output_text_delta)
else:
# Normal path: emit token deltas when not filtering CoT
if result:
response_output_text_delta = ResponseTextDeltaEvent(
type="response.output_text.delta",
item_id=f"msg_{request_id}",
sequence_number=sequence_number,
output_index=output_index,
content_index=content_index,
delta=result,
logprobs=[],
)
sequence_number += 1
yield self.chunk_to_sse_element(response_output_text_delta)
# Signal the end of the text generation
response_output_text_done = ResponseTextDoneEvent(
type="response.output_text.done",
item_id=f"msg_{request_id}",
sequence_number=sequence_number,
output_index=output_index,
content_index=0,
text=results,
logprobs=[],
)
sequence_number += 1
yield self.chunk_to_sse_element(response_output_text_done)
# Complete the content part
response_content_part_done = ResponseContentPartDoneEvent(
type="response.content_part.done",
item_id=f"msg_{request_id}",
sequence_number=sequence_number,
output_index=output_index,
content_index=content_index,
part=ResponseOutputText(type="output_text", text=response_output_text_done.text, annotations=[]),
)
sequence_number += 1
content_index += 1
yield self.chunk_to_sse_element(response_content_part_done)
# Complete the output item
response_output_item_done = ResponseOutputItemDoneEvent(
type="response.output_item.done",
sequence_number=sequence_number,
output_index=output_index,
item=ResponseOutputMessage(
id=f"msg_{request_id}",
type="message",
status="completed",
role="assistant",
content=[response_content_part_done.part],
annotations=[],
),
)
sequence_number += 1
output_index += 1
yield self.chunk_to_sse_element(response_output_item_done)
# Finally, Complete the event
response_completed = ResponseCompletedEvent(
type="response.completed",
sequence_number=sequence_number,
response=Response(
id=f"resp_{request_id}",
created_at=created_at,
status="completed",
model=model_id_and_revision,
instructions=req.get("instructions"),
text={"format": {"type": "text"}},
output=[response_output_item_done.item],
object="response",
tools=[],
parallel_tool_calls=req.get("parallel_tool_calls", False),
tool_choice="auto",
metadata=req.get("metadata"),
),
)
sequence_number += 1
yield self.chunk_to_sse_element(response_completed)
thread.join()
except Exception as e:
logger.error(f"Exception in response generation: {str(e)}")
error_event = ResponseErrorEvent(
type="error",
sequence_number=sequence_number,
message=str(e),
)
sequence_number += 1
yield self.chunk_to_sse_element(error_event)
response_failed = ResponseFailedEvent(
type="response.failed",
sequence_number=sequence_number,
response=Response(
id=f"resp_{request_id}",
created_at=created_at,
status="failed",
model=model_id_and_revision,
instructions=req.get("instructions"),
text={"format": {"type": "text"}},
output=[],
object="response",
tools=[],
parallel_tool_calls=False,
tool_choice="auto",
metadata=req.get("metadata"),
error=ResponseError(
code="server_error",
message=str(e),
),
),
)
sequence_number += 1
yield self.chunk_to_sse_element(response_failed)
finally:
thread.join()
return stream_response(generation_streamer, request_id)
def generate_response_non_streaming(self, req: dict) -> dict:
"""
Generates an OpenAI Response in non-streaming mode (single JSON payload).
Args:
req (`dict`): The request to generate an OpenAI Response for.
Returns:
`dict`: The OpenAI `Response` serialized as a dict.
"""
model_id_and_revision = self.process_model_name(req["model"])
must_discard_cache = model_id_and_revision != self.last_model
self.last_model = model_id_and_revision
model, processor = self.load_model_and_processor(model_id_and_revision)
if isinstance(req["input"], str):
inputs = [{"role": "system", "content": req["instructions"]}] if "instructions" in req else []
inputs.append({"role": "user", "content": req["input"]})
elif isinstance(req["input"], list):
if "instructions" in req:
if req["input"][0]["role"] != "system":
inputs = [{"role": "system", "content": req["instructions"]}, *req["input"]]
else:
inputs = req["input"]
inputs[0]["content"] = req["instructions"]
else:
inputs = req["input"]
elif isinstance(req["input"], dict):
inputs = [{"role": "system", "content": req["instructions"]}] if "instructions" in req else []
inputs.append(req["input"])
else:
raise ValueError("inputs should be a list, dict, or str")
inputs = processor.apply_chat_template(inputs, add_generation_prompt=True, return_tensors="pt")["input_ids"]
inputs = inputs.to(model.device)
request_id = req.get("previous_response_id", "req_0")
# Temporary hack for GPTOSS 1: don't filter special tokens
skip_special_tokens = True
if "gptoss" in model.config.architectures[0].lower():
skip_special_tokens = False
generation_config = create_generation_config_from_req(req, model_generation_config=model.generation_config)
last_kv_cache = None
if self.is_continuation(req) and not must_discard_cache:
seq_len = self.last_kv_cache.get_seq_length()
if inputs.shape[-1] > seq_len:
last_kv_cache = self.last_kv_cache
generate_output = model.generate(
inputs=inputs,
attention_mask=torch_ones_like(inputs),
generation_config=generation_config,
return_dict_in_generate=True,
past_key_values=last_kv_cache,
)
# save KV cache
self.last_kv_cache = generate_output.past_key_values
# Decode full text
full_text = processor.batch_decode(generate_output.sequences, skip_special_tokens=skip_special_tokens)[0]
created_at = time.time()
response_output_item = ResponseOutputMessage(
id=f"msg_{request_id}",
type="message",
status="completed",
role="assistant",
content=[ResponseOutputText(type="output_text", text=full_text, annotations=[])],
annotations=[],
)
response_completed = Response(
id=f"resp_{request_id}",
created_at=created_at,
status="completed",
model=model_id_and_revision,
instructions=req.get("instructions"),
text={"format": {"type": "text"}},
output=[response_output_item],
object="response",
tools=[],
parallel_tool_calls=req.get("parallel_tool_calls", False),
tool_choice="auto",
metadata=req.get("metadata"),
)
return response_completed.model_dump(exclude_none=True)
def generate_transcription(self, req: dict) -> Generator[str, None, None]:
"""
Generates an OpenAI Transcription using the audio file.
Args:
req (`dict`): The request containing the audio file and model information.
Returns:
`Generator[str, None, None]`: A generator that yields the transcription result.
"""
# TODO: implement streaming transcription (currently, it's not streaming)
if not is_librosa_available():
raise ImportError(
"Missing librosa dependency for audio transcription. Please install with `pip install librosa`"
)
model_id_and_revision = self.process_model_name(req["model"])
audio_model, audio_processor = self.load_audio_model_and_processor(model_id_and_revision)
generation_streamer = TextIteratorStreamer(
audio_processor.tokenizer, skip_special_tokens=True, skip_prompt=True
)
generation_config = create_generation_config_from_req(
req, model_generation_config=audio_model.generation_config
)
# Read the binary audio file using librosa
model_sampling_rate = audio_processor.feature_extractor.sampling_rate
audio_bytes = io.BytesIO(req["file"])
audio_array, _ = librosa.load(audio_bytes, sr=model_sampling_rate, mono=True)
audio_inputs = audio_processor(audio_array, sampling_rate=model_sampling_rate, return_tensors="pt").to(
audio_model.device
)
audio_inputs["input_features"] = audio_inputs["input_features"].to(audio_model.dtype)
generation_kwargs = {
"streamer": generation_streamer,
"generation_config": generation_config,
"return_dict_in_generate": True,
}
def _generate_transcription():
generated_ids = audio_model.generate(**audio_inputs, **generation_kwargs)
transcription_text = audio_processor.batch_decode(generated_ids.sequences, skip_special_tokens=True)[0]
transcription = Transcription(text=transcription_text)
yield f"{transcription.model_dump_json(exclude_none=True)}"
return _generate_transcription()
def is_continuation(self, req: dict) -> bool:
"""
Determines whether the current request is a continuation of the last request. In other words, if it is the
same chat session.
Args:
req (`dict`): The request to check.
Returns:
`True` if the request is a continuation of the last request, `False` otherwise.
"""
messages = req.get("messages") or req.get("input") # ChatCompletion and Response have different fields
req_continues_last_messages = True
# No cached messages: this is a new request
if self.last_messages is None:
req_continues_last_messages = False
# The new request has no new rounds of conversation: this is a new request
elif len(self.last_messages) >= len(messages):
req_continues_last_messages = False
# Otherwise, check that the last messages are a subset of the new request
else:
for i in range(len(self.last_messages)):
if self.last_messages[i] != messages[i]:
req_continues_last_messages = False
break
self.last_messages = messages
return req_continues_last_messages
def get_quantization_config(self) -> Optional[BitsAndBytesConfig]:
"""
Returns the quantization config for the given CLI arguments.
Returns:
`Optional[BitsAndBytesConfig]`: The quantization config.
"""
if self.quantization == "bnb-4bit":
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
)
elif self.quantization == "bnb-8bit":
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
else:
quantization_config = None
if quantization_config is not None:
logger.info(f"Quantization applied with the following config: {quantization_config}")
return quantization_config
def process_model_name(self, model_id: str) -> str:
"""
Applies the `force_model` CLI argument and canonicalizes the model name to the format "model_id@revision".
If the model_id DOESN'T contain an @, it defaults to "model_id@main".
Args:
model_id (`str`): The model ID.
Returns:
`str`: The canonicalized model name to be used
"""
if self.force_model is not None:
model_id = self.force_model
if "@" in model_id:
return model_id
return f"{model_id}@main"
def _load_model_and_data_processor(self, model_id_and_revision: str):
"""
Generic method to load a model and a data processor from a model ID and revision, making use of the serve CLI
arguments.
Args:
model_id_and_revision (`str`):
The model ID and revision to load.
model_cls (`type[PreTrainedModel]`):
The model class to load.
Returns:
`tuple[PreTrainedModel, Union[ProcessorMixin, PreTrainedTokenizerFast]]`: The loaded model and
data processor (tokenizer, audio processor, etc.).
"""
import torch
from transformers import AutoConfig, AutoProcessor
logger.info(f"Loading {model_id_and_revision}")
if "@" in model_id_and_revision:
model_id, revision = model_id_and_revision.split("@", 1)
else:
model_id, revision = model_id_and_revision, "main"
data_processor = AutoProcessor.from_pretrained(
model_id,
revision=revision,
trust_remote_code=self.trust_remote_code,
)
dtype = self.dtype if self.dtype in ["auto", None] else getattr(torch, self.dtype)
quantization_config = self.get_quantization_config()
model_kwargs = {
"revision": revision,
"attn_implementation": self.attn_implementation,
"dtype": dtype,
"device_map": self.device,
"trust_remote_code": self.trust_remote_code,
"quantization_config": quantization_config,
}
config = AutoConfig.from_pretrained(model_id, **model_kwargs)
architecture = getattr(transformers, config.architectures[0])
model = architecture.from_pretrained(model_id, **model_kwargs)
has_default_max_length = (
model.generation_config.max_new_tokens is None and model.generation_config.max_length == 20
)
has_short_max_new_tokens = (
model.generation_config.max_new_tokens is not None and model.generation_config.max_new_tokens < 1024
)
if has_default_max_length or has_short_max_new_tokens:
model.generation_config.max_new_tokens = 1024
logger.info(f"Loaded model {model_id_and_revision}")
return model, data_processor
def load_model_and_processor(
self, model_id_and_revision: str
) -> tuple["PreTrainedModel", "PreTrainedTokenizerFast"]:
"""
Loads the text model and processor from the given model ID and revision into the ServeCommand instance.
Args:
model_id_and_revision (`str`):
The model ID and revision to load.
Returns:
`tuple[PreTrainedModel, PreTrainedTokenizerFast]`: The loaded text model and processor.
"""
if model_id_and_revision not in self.loaded_models or self.loaded_models[model_id_and_revision].is_deleted():
model, processor = self._load_model_and_data_processor(model_id_and_revision)
self.loaded_models[model_id_and_revision] = TimedModel(
model,
timeout_seconds=self.model_timeout,
processor=processor,
)
else:
self.loaded_models[model_id_and_revision].reset_timer()
model = self.loaded_models[model_id_and_revision].model
processor = self.loaded_models[model_id_and_revision].processor
return model, processor
def load_audio_model_and_processor(self, model_id_and_revision: str) -> tuple["PreTrainedModel", "ProcessorMixin"]:
"""
Loads the audio model and processor from the given model ID and revision into the ServeCommand instance.
Args:
model_id_and_revision (`str`):
The model ID and revision to load.
Returns:
`tuple[PreTrainedModel, ProcessorMixin]`: The loaded audio model and processor.
"""
if model_id_and_revision not in self.loaded_models or self.loaded_models[model_id_and_revision].is_deleted():
audio_model, audio_processor = self._load_model_and_data_processor(model_id_and_revision)
self.loaded_models[model_id_and_revision] = TimedModel(
audio_model,
timeout_seconds=self.model_timeout,
processor=audio_processor,
)
else:
self.loaded_models[model_id_and_revision].reset_timer()
audio_model = self.loaded_models[model_id_and_revision].model
audio_processor = self.loaded_models[model_id_and_revision].processor
return audio_model, audio_processor
# set docstring separately to make it look nice (Typer doesn't play well with the class command)
Serve.__doc__ = """
Run a FastAPI server to serve models on-demand with an OpenAI compatible API.
Models will be loaded and unloaded automatically based on usage and a timeout.
\b
The server will expose the following endpoints:
- POST /v1/chat/completions: Generates chat completions.
- POST /v1/responses: Generates responses.
- POST /v1/audio/transcriptions: Generates transcriptions from audio.
- GET /v1/models: Lists available models for 3rd party tools.
Requires FastAPI and Uvicorn to be installed.
"""
if __name__ == "__main__":
serve = Serve()
|
Serve
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/legacy_row_conditions.py
|
{
"start": 2142,
"end": 2770
}
|
class ____(enum.Enum):
"""Type of condition or parser to be used to interpret a RowCondition
Note that many of these are forward looking and are not yet implemented.
In the future this enum can be used internally
instead of strings for the condition_parser user input.
"""
GE = "ge" # GE intermediate language
SPARK = "spark" # Spark pyspark.sql.Column type
SPARK_SQL = "spark_sql" # String type
PANDAS = "pandas" # pandas parser for pandas DataFrame.query()
PYTHON = "python" # python parser for DataFrame.query()
SQL = "sql" # Selectable type
@dataclass
|
RowConditionParserType
|
python
|
django__django
|
tests/db_functions/math/test_cos.py
|
{
"start": 268,
"end": 2270
}
|
class ____(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_cos=Cos("normal")).first()
self.assertIsNone(obj.null_cos)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal("-12.9"), n2=Decimal("0.6"))
obj = DecimalModel.objects.annotate(n1_cos=Cos("n1"), n2_cos=Cos("n2")).first()
self.assertIsInstance(obj.n1_cos, Decimal)
self.assertIsInstance(obj.n2_cos, Decimal)
self.assertAlmostEqual(obj.n1_cos, Decimal(math.cos(obj.n1)))
self.assertAlmostEqual(obj.n2_cos, Decimal(math.cos(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_cos=Cos("f1"), f2_cos=Cos("f2")).first()
self.assertIsInstance(obj.f1_cos, float)
self.assertIsInstance(obj.f2_cos, float)
self.assertAlmostEqual(obj.f1_cos, math.cos(obj.f1))
self.assertAlmostEqual(obj.f2_cos, math.cos(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_cos=Cos("small"),
normal_cos=Cos("normal"),
big_cos=Cos("big"),
).first()
self.assertIsInstance(obj.small_cos, float)
self.assertIsInstance(obj.normal_cos, float)
self.assertIsInstance(obj.big_cos, float)
self.assertAlmostEqual(obj.small_cos, math.cos(obj.small))
self.assertAlmostEqual(obj.normal_cos, math.cos(obj.normal))
self.assertAlmostEqual(obj.big_cos, math.cos(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Cos):
DecimalModel.objects.create(n1=Decimal("-8.0"), n2=Decimal("0"))
DecimalModel.objects.create(n1=Decimal("3.14"), n2=Decimal("0"))
obj = DecimalModel.objects.filter(n1__cos__gt=-0.2).get()
self.assertEqual(obj.n1, Decimal("-8.0"))
|
CosTests
|
python
|
huggingface__transformers
|
src/transformers/models/llava_next_video/modeling_llava_next_video.py
|
{
"start": 6759,
"end": 7740
}
|
class ____(nn.Module):
def __init__(self, config: LlavaNextVideoConfig):
super().__init__()
# We have hidden_size * the number of vision feature layers
num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer)
self.linear_1 = nn.Linear(
config.vision_config.hidden_size * num_feature_layers,
config.text_config.hidden_size,
bias=config.multimodal_projector_bias,
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias
)
def forward(self, image_features):
hidden_states = self.linear_1(image_features)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
@auto_docstring
|
LlavaNextVideoMultiModalProjector
|
python
|
PyCQA__pylint
|
tests/functional/t/too/too_many_ancestors.py
|
{
"start": 292,
"end": 385
}
|
class ____(Aaaa, Bbbb, Cccc, Dddd, Eeee, Ffff, Gggg, Hhhh): # [too-many-ancestors]
pass
|
Iiii
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-tplcentral/source_tplcentral/streams.py
|
{
"start": 3445,
"end": 4715
}
|
class ____(TplcentralStream, ABC):
state_checkpoint_interval = 100
cursor_field = "_cursor"
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
current = current_stream_state.get(self.cursor_field, "")
latest = latest_record.get(self.cursor_field, "")
if current and latest:
return {self.cursor_field: max(arrow.get(latest), arrow.get(current)).datetime.replace(tzinfo=None).isoformat()}
return {self.cursor_field: max(latest, current)}
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
if stream_state is None:
stream_state = {}
return [{self.cursor_field: stream_state.get(self.cursor_field, self.start_date)}]
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = super().request_params(
stream_state=stream_state,
stream_slice=stream_slice,
next_page_token=next_page_token,
)
return params or {}
|
IncrementalTplcentralStream
|
python
|
joke2k__faker
|
faker/providers/person/fr_CH/__init__.py
|
{
"start": 44,
"end": 6954
}
|
class ____(PersonProvider):
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}-{{last_name}}",
)
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}-{{last_name}}",
)
formats = formats_male + formats_female
# source:
# http://www.bfs.admin.ch/bfs/portal/fr/index/news/publikationen.html?publicationID=6704
first_names_male = [
"Alain",
"Albert",
"Alexandre",
"André",
"Antonio",
"Arthur",
"Bernard",
"Bruno",
"Charles",
"Christian",
"Christophe",
"Claude",
"Daniel",
"David",
"Eric",
"Ethan",
"Florian",
"François",
"Frédéric",
"Gabriel",
"Georges",
"Gilbert",
"Guillaume",
"Gérard",
"Henri",
"Hugo",
"Jacques",
"Jean",
"Jean-Claude",
"Jean-Pierre",
"Jonathan",
"José",
"Julien",
"Kevin",
"Laurent",
"Louis",
"Loïc",
"Luca",
"Lucas",
"Léo",
"Manuel",
"Marcel",
"Mathieu",
"Matteo",
"Maurice",
"Maxime",
"Michael",
"Michel",
"Nathan",
"Nicolas",
"Noah",
"Nolan",
"Olivier",
"Pascal",
"Patrick",
"Paul",
"Philippe",
"Pierre",
"Raymond",
"René",
"Robert",
"Roger",
"Roland",
"Romain",
"Samuel",
"Stéphane",
"Sébastien",
"Thierry",
"Thomas",
"Théo",
"Vincent",
]
first_names_female = [
"Alice",
"Alicia",
"Ana",
"Anna",
"Anne",
"Aurélie",
"Camille",
"Caroline",
"Catherine",
"Chantal",
"Charlotte",
"Chloé",
"Christiane",
"Christine",
"Clara",
"Claudine",
"Corinne",
"Céline",
"Danielle",
"Denise",
"Eliane",
"Elisa",
"Elisabeth",
"Elodie",
"Emilie",
"Emma",
"Eva",
"Fabienne",
"Françoise",
"Georgette",
"Germaine",
"Hélène",
"Isabelle",
"Jacqueline",
"Jeanne",
"Jessica",
"Josiane",
"Julie",
"Laetitia",
"Lara",
"Laura",
"Laurence",
"Liliane",
"Lisa",
"Lucie",
"Léa",
"Madeleine",
"Manon",
"Marcelle",
"Marguerite",
"Maria",
"Marianne",
"Marie",
"Mathilde",
"Monique",
"Mélanie",
"Nathalie",
"Nelly",
"Nicole",
"Odette",
"Patricia",
"Sandra",
"Sandrine",
"Sara",
"Sarah",
"Simone",
"Sophie",
"Stéphanie",
"Suzanne",
"Sylvie",
"Thérèse",
"Valérie",
"Vanessa",
"Véronique",
"Yvette",
"Yvonne",
"Zoé",
]
first_names = first_names_male + first_names_female
# source = http://kunden.eye.ch/swissgen/rsr/index.html
last_names = [
"Aebi",
"Aeby",
"Alber",
"Babey",
"Badan",
"Badel",
"Bahon",
"Balmat",
"Barbey",
"Barillon",
"Barman",
"Bavaud",
"Beguin",
"Berberat",
"Bernasconi",
"Besançon",
"Besençon",
"Besse",
"Beuchat",
"Beuret",
"Beurret",
"Blanc",
"Bochud",
"Boechat",
"Boichat",
"Boillat",
"Bonvin",
"Bonvini",
"Botteron",
"Bourquard",
"Bourquin",
"Bouvier",
"Bovet",
"Brahier",
"Brandt",
"Broquet",
"Bugnon",
"Bujard",
"Béguelin",
"Candaux",
"Carraud",
"Carraux",
"Carron",
"Cattin",
"Chappuis",
"Chapuis",
"Charpié",
"Chatriand",
"Chatriant",
"Chaudet",
"Chenaux",
"Chevalley",
"Chevrolet",
"Chopard",
"Coigny",
"Comman",
"Comment",
"Comte",
"Conrad",
"Corbat",
"Corboz",
"Cornut",
"Cornuz",
"Corpataux",
"Cosandey",
"Cosendey",
"Cossy",
"Courvoisier",
"Cousin",
"Cretton",
"Crevoisier",
"Crivelli",
"Curdy",
"de Dardel",
"Deladoëy",
"Delèze",
"Deshusses",
"Diesbach",
"Droz",
"Dubey",
"Duroux",
"Duvanel",
"Délèze",
"Evéquoz",
"Fonjallaz",
"Francillon",
"Galland",
"Georges",
"Gillièron",
"Gilliéron",
"Godet",
"Grand",
"Grojean",
"Grosjean",
"Gubéran",
"Humbert",
"Isella",
"Jacot-Descombes",
"Jacot-Guillarmod",
"Joly",
"Jomini",
"Joye",
"Julliard",
"Maire",
"Marti",
"Martin",
"Marty",
"Masseron",
"Matile",
"Mayor",
"Menthonnex",
"Mercier",
"Meyer",
"Monnard",
"Monnet",
"Monnet",
"Monney",
"Montandon",
"Morand",
"Morard",
"Mottet",
"Mottiez",
"Muriset",
"Musy",
"Müller",
"Niquille",
"Nusslé",
"Nüsslin",
"Paccot",
"Pachoud",
"Paschoud",
"Pasquier",
"Peitrequin",
"Pellet",
"Piccand",
"Polla",
"Privet",
"Quartier",
"Rapin",
"Rappaz",
"Rapraz",
"Rey",
"Robadey",
"Robert",
"Romanens",
"Rosselat",
"Rosselet",
"Rossellat",
"Sandoz",
"Sansonnens",
"Saudan",
"Thorens",
"Théraulaz",
"Tinguely",
"Treboux",
"Uldry",
"Vallélian",
"Vermeil",
"Vienne",
"Vonlanthen",
"Vuille",
"Wicht",
]
|
Provider
|
python
|
MorvanZhou__Reinforcement-learning-with-tensorflow
|
experiments/Robot_arm/DPPO.py
|
{
"start": 4554,
"end": 8028
}
|
class ____(object):
def __init__(self, wid):
self.wid = wid
self.env = ArmEnv(mode=MODE[n_model])
self.ppo = GLOBAL_PPO
def work(self):
global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
s = self.env.reset()
ep_r = 0
buffer_s, buffer_a, buffer_r = [], [], []
for t in range(EP_LEN):
if not ROLLING_EVENT.is_set(): # while global PPO is updating
ROLLING_EVENT.wait() # wait until PPO is updated
buffer_s, buffer_a, buffer_r = [], [], [] # clear history buffer
a = self.ppo.choose_action(s)
s_, r, done = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r) # normalize reward, find to be useful
s = s_
ep_r += r
GLOBAL_UPDATE_COUNTER += 1 # count to minimum batch size
if t == EP_LEN - 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
v_s_ = self.ppo.get_v(s_)
discounted_r = [] # compute discounted reward
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
QUEUE.put(np.hstack((bs, ba, br)))
if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
ROLLING_EVENT.clear() # stop collecting data
UPDATE_EVENT.set() # globalPPO update
if GLOBAL_EP >= EP_MAX: # stop training
COORD.request_stop()
break
# record reward changes, plot later
if len(GLOBAL_RUNNING_R) == 0: GLOBAL_RUNNING_R.append(ep_r)
else: GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1]*0.9+ep_r*0.1)
GLOBAL_EP += 1
print('{0:.1f}%'.format(GLOBAL_EP/EP_MAX*100), '|W%i' % self.wid, '|Ep_r: %.2f' % ep_r,)
if __name__ == '__main__':
GLOBAL_PPO = PPO()
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
UPDATE_EVENT.clear() # no update now
ROLLING_EVENT.set() # start to roll out
workers = [Worker(wid=i) for i in range(N_WORKER)]
GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
GLOBAL_RUNNING_R = []
COORD = tf.train.Coordinator()
QUEUE = queue.Queue()
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start()
threads.append(t)
# add a PPO updating thread
threads.append(threading.Thread(target=GLOBAL_PPO.update,))
threads[-1].start()
COORD.join(threads)
# plot reward change and testing
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('Episode'); plt.ylabel('Moving reward'); plt.ion(); plt.show()
env.set_fps(30)
while True:
s = env.reset()
for t in range(400):
env.render()
s = env.step(GLOBAL_PPO.choose_action(s))[0]
|
Worker
|
python
|
getsentry__sentry
|
src/sudo/middleware.py
|
{
"start": 459,
"end": 2470
}
|
class ____(MiddlewareMixin):
"""
Middleware that contributes ``request.is_sudo()`` and sets the required
cookie for sudo mode to work correctly.
"""
def has_sudo_privileges(self, request: HttpRequest) -> bool:
# Override me to alter behavior
return has_sudo_privileges(request)
def process_request(self, request: HttpRequest) -> None:
assert hasattr(request, "session"), (
"The Sudo middleware requires session middleware to be installed."
"Edit your MIDDLEWARE setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'sudo.middleware.SudoMiddleware'."
)
request.is_sudo = lambda: self.has_sudo_privileges(request)
def process_response(
self, request: HttpRequest, response: HttpResponseBase
) -> HttpResponseBase:
is_sudo = getattr(request, "_sudo", None)
if is_sudo is None:
return response
# We have explicitly had sudo revoked, so clean up cookie
if is_sudo is False and COOKIE_NAME in request.COOKIES:
response.delete_cookie(COOKIE_NAME)
return response
# Sudo mode has been granted,
# and we have a token to send back to the user agent
if (
is_sudo is True
and hasattr(request, "_sudo_token")
and hasattr(request, "_sudo_max_age")
):
token = request._sudo_token
max_age = request._sudo_max_age
response.set_signed_cookie(
COOKIE_NAME,
token,
salt=COOKIE_SALT,
max_age=max_age, # If max_age is None, it's a session cookie
secure=request.is_secure() if COOKIE_SECURE is None else COOKIE_SECURE,
httponly=COOKIE_HTTPONLY, # Not accessible by JavaScript
path=COOKIE_PATH,
domain=COOKIE_DOMAIN,
)
return response
|
SudoMiddleware
|
python
|
facebook__pyre-check
|
tools/upgrade/commands/consolidate_nested_configurations.py
|
{
"start": 1189,
"end": 5278
}
|
class ____(ErrorSuppressingCommand):
def __init__(
self,
command_arguments: CommandArguments,
*,
repository: Repository,
subdirectory: Optional[str],
) -> None:
super().__init__(command_arguments, repository)
self._subdirectory: Final[Optional[str]] = subdirectory
@staticmethod
def from_arguments(
arguments: argparse.Namespace, repository: Repository
) -> "ConsolidateNestedConfigurations":
command_arguments = CommandArguments.from_arguments(arguments)
return ConsolidateNestedConfigurations(
command_arguments,
repository=repository,
subdirectory=arguments.subdirectory,
)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(ConsolidateNestedConfigurations, cls).add_arguments(parser)
parser.set_defaults(command=cls.from_arguments)
parser.add_argument("--subdirectory")
@staticmethod
def gather_nested_configuration_mapping(
configurations: List[str],
) -> Dict[str, List[str]]:
nested_configurations = {}
for configuration in configurations:
if len(nested_configurations) == 0:
nested_configurations[configuration] = []
continue
inserted = False
for topmost_configuration in nested_configurations.keys():
existing = topmost_configuration.replace(
".pyre_configuration.local", ""
)
current = configuration.replace(".pyre_configuration.local", "")
if current.startswith(existing):
nested_configurations[topmost_configuration].append(configuration)
inserted = True
break
elif existing.startswith(current):
nested_configurations[configuration] = nested_configurations[
topmost_configuration
] + [topmost_configuration]
del nested_configurations[topmost_configuration]
inserted = True
break
if not inserted:
nested_configurations[configuration] = []
return nested_configurations
@override
def run(self) -> None:
subdirectory = self._subdirectory
subdirectory = Path(subdirectory) if subdirectory else Path.cwd()
# Find configurations
configurations = sorted(find_files(subdirectory, ".pyre_configuration.local"))
if not configurations:
LOG.warning(
f"Skipping consolidation. No configurations found in {subdirectory}"
)
return
if len(configurations) == 1:
configuration = configurations[0]
LOG.warning(
f"Skipping consolidation. Only one configuration found: {configuration}"
)
return
# Gather nesting structure of configurations
nested_configurations = self.gather_nested_configuration_mapping(configurations)
if all(len(nested) == 0 for nested in nested_configurations.values()):
LOG.warning(
"Skipping consolidation. None of the configurations found are nested."
)
return
# Consolidate targets
for topmost, nested in nested_configurations.items():
if len(nested) == 0:
continue
consolidate_nested(
self._repository,
Path(topmost),
[Path(configuration) for configuration in nested],
)
configuration = Configuration(Path(topmost))
self._get_and_suppress_errors(configuration)
self._repository.commit_changes(
commit=(not self._no_commit),
title=f"Consolidate configurations in {subdirectory}",
summary="Consolidating nested configurations.",
set_dependencies=False,
)
|
ConsolidateNestedConfigurations
|
python
|
airbytehq__airbyte
|
airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/tests/test_core.py
|
{
"start": 73003,
"end": 78555
}
|
class ____(BaseTest):
# Override from BaseTest!
# Used so that this is not part of the mandatory high strictness test suite yet
MANDATORY_FOR_TEST_STRICTNESS_LEVELS = []
@pytest.fixture(name="operational_certification_test")
async def operational_certification_test_fixture(self, is_connector_certified: bool) -> bool:
"""
Fixture that is used to skip a test that is reserved only for connectors that are supposed to be tested
against operational certification criteria
"""
if not is_connector_certified:
pytest.skip("Skipping operational connector certification test for uncertified connector")
return True
@pytest.fixture(name="streams_without_primary_key")
def streams_without_primary_key_fixture(self, inputs: ConnectorAttributesConfig) -> List[NoPrimaryKeyConfiguration]:
return inputs.streams_without_primary_key or []
async def test_streams_define_primary_key(
self, operational_certification_test, streams_without_primary_key, connector_config, docker_runner: ConnectorRunner
) -> None:
output = await docker_runner.call_discover(config=connector_config)
catalog_messages = filter_output(output, Type.CATALOG)
streams = catalog_messages[0].catalog.streams
discovered_streams_without_primary_key = {stream.name for stream in streams if not stream.source_defined_primary_key}
missing_primary_keys = discovered_streams_without_primary_key - {stream.name for stream in streams_without_primary_key}
quoted_missing_primary_keys = {f"'{primary_key}'" for primary_key in missing_primary_keys}
assert not missing_primary_keys, f"The following streams {', '.join(quoted_missing_primary_keys)} do not define a primary_key"
@pytest.fixture(name="allowed_hosts_test")
def allowed_hosts_fixture_test(self, inputs: ConnectorAttributesConfig) -> bool:
allowed_hosts = inputs.allowed_hosts
bypass_reason = allowed_hosts.bypass_reason if allowed_hosts else None
if bypass_reason:
pytest.skip(f"Skipping `metadata.allowedHosts` checks. Reason: {bypass_reason}")
return True
async def test_certified_connector_has_allowed_hosts(
self, operational_certification_test, allowed_hosts_test, connector_metadata: dict
) -> None:
"""
Checks whether or not the connector has `allowedHosts` and it's components defined in `metadata.yaml`.
Suitable for certified connectors starting `ql` >= 400.
Arguments:
:: operational_certification_test -- pytest.fixure defines the connector is suitable for this test or not.
:: connector_metadata -- `metadata.yaml` file content
"""
metadata = connector_metadata.get("data", {})
has_allowed_hosts_property = "allowedHosts" in metadata.keys()
assert has_allowed_hosts_property, f"The `allowedHosts` property is missing in `metadata.data` for `metadata.yaml`."
allowed_hosts = metadata.get("allowedHosts", {})
has_hosts_property = "hosts" in allowed_hosts.keys() if allowed_hosts else False
assert has_hosts_property, f"The `hosts` property is missing in `metadata.data.allowedHosts` for `metadata.yaml`."
hosts = allowed_hosts.get("hosts", [])
has_assigned_hosts = len(hosts) > 0 if hosts else False
assert (
has_assigned_hosts
), f"The `hosts` empty list is not allowed for `metadata.data.allowedHosts` for certified connectors. Please add `hosts` or define the `allowed_hosts.bypass_reason` in `acceptance-test-config.yaml`."
@pytest.fixture(name="suggested_streams_test")
def suggested_streams_fixture_test(self, inputs: ConnectorAttributesConfig) -> bool:
suggested_streams = inputs.suggested_streams
bypass_reason = suggested_streams.bypass_reason if suggested_streams else None
if bypass_reason:
pytest.skip(f"Skipping `metadata.suggestedStreams` checks. Reason: {bypass_reason}")
return True
async def test_certified_connector_has_suggested_streams(
self, operational_certification_test, suggested_streams_test, connector_metadata: dict
) -> None:
"""
Checks whether or not the connector has `suggestedStreams` and it's components defined in `metadata.yaml`.
Suitable for certified connectors starting `ql` >= 400.
Arguments:
:: operational_certification_test -- pytest.fixure defines the connector is suitable for this test or not.
:: connector_metadata -- `metadata.yaml` file content
"""
metadata = connector_metadata.get("data", {})
has_suggested_streams_property = "suggestedStreams" in metadata.keys()
assert has_suggested_streams_property, f"The `suggestedStreams` property is missing in `metadata.data` for `metadata.yaml`."
suggested_streams = metadata.get("suggestedStreams", {})
has_streams_property = "streams" in suggested_streams.keys() if suggested_streams else False
assert has_streams_property, f"The `streams` property is missing in `metadata.data.suggestedStreams` for `metadata.yaml`."
streams = suggested_streams.get("streams", [])
has_assigned_suggested_streams = len(streams) > 0 if streams else False
assert (
has_assigned_suggested_streams
), f"The `streams` empty list is not allowed for `metadata.data.suggestedStreams` for certified connectors."
|
TestConnectorAttributes
|
python
|
wandb__wandb
|
wandb/vendor/pygments/filters/__init__.py
|
{
"start": 6019,
"end": 9314
}
|
class ____(Filter):
"""Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in [('spaces', u'·'),
('tabs', u'»'),
('newlines', u'¶')]:
opt = options.get(name, False)
if isinstance(opt, string_types) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' ' * (tabsize - 1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or u' '
tabs = self.tabs or u'\t'
newlines = self.newlines or u'\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
for sttype, svalue in _replace_special(ttype, value, regex,
Whitespace, replacefunc):
yield sttype, svalue
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
|
VisibleWhitespaceFilter
|
python
|
realpython__materials
|
python-flask-example-heroku/config.py
|
{
"start": 229,
"end": 302
}
|
class ____(Config):
DEBUG = True
DEVELOPMENT = True
|
DevelopmentConfig
|
python
|
google__jax
|
jax/experimental/_private_mm/mini_dime.py
|
{
"start": 2377,
"end": 3736
}
|
class ____(tuple[jax.Device, ...]):
def __new__(cls, *args):
return super().__new__(cls, sorted(set(args), key=lambda d: d.id))
@cached_property
def ranks(self):
return OrderedDict((d, idx) for idx, d in enumerate(self))
@property
def leader(self):
return self[0]
@cached_property
def key(self) -> str:
return ",".join(str(d.id) for d in self)
local_comms: dict = {}
def get_or_create_comm(devs: UniqueDevices):
TIMEOUT = 5_000
comm = local_comms.get(devs)
my_process_index = jax.process_index()
if comm is None:
if devs.leader.process_index == my_process_index:
nccl_id = nccl.get_unique_id()
get_distributed_client().key_value_set_bytes(
devs.key, pickle.dumps(nccl_id)
)
else:
nccl_id = get_distributed_client().blocking_key_value_get_bytes(
devs.key, TIMEOUT
)
nccl_id = pickle.loads(nccl_id)
nccl.groupStart()
for d in devs:
if d.process_index == my_process_index:
with cupy.cuda.Device(d.local_hardware_id):
comm = nccl.NcclCommunicator(len(devs), nccl_id, devs.ranks[d])
nccl.groupEnd()
local_comms[devs] = comm
return comm
local_streams: dict = {}
|
UniqueDevices
|
python
|
crytic__slither
|
slither/detectors/statements/tautological_compare.py
|
{
"start": 306,
"end": 2249
}
|
class ____(AbstractDetector):
"""
Same variable comparison detector
"""
ARGUMENT = "tautological-compare"
HELP = "Comparing a variable to itself always returns true or false, depending on comparison"
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#tautological-compare"
WIKI_TITLE = "Tautological compare"
WIKI_DESCRIPTION = "A variable compared to itself is probably an error as it will always return `true` for `==`, `>=`, `<=` and always `false` for `<`, `>` and `!=`."
WIKI_EXPLOIT_SCENARIO = """
```solidity
function check(uint a) external returns(bool){
return (a >= a);
}
```
`check` always return true."""
WIKI_RECOMMENDATION = "Remove comparison or compare to different value."
def _check_function(self, f: Function) -> List[Output]:
affected_nodes = set()
for node in f.nodes:
for ir in node.irs:
if isinstance(ir, Binary):
if ir.type in [
BinaryType.GREATER,
BinaryType.GREATER_EQUAL,
BinaryType.LESS,
BinaryType.LESS_EQUAL,
BinaryType.EQUAL,
BinaryType.NOT_EQUAL,
]:
if ir.variable_left == ir.variable_right:
affected_nodes.add(node)
results = []
for n in affected_nodes:
info: DETECTOR_INFO = [f, " compares a variable to itself:\n\t", n, "\n"]
res = self.generate_result(info)
results.append(res)
return results
def _detect(self):
results = []
for f in self.compilation_unit.functions_and_modifiers:
results.extend(self._check_function(f))
return results
|
TautologicalCompare
|
python
|
tensorflow__tensorflow
|
tensorflow/python/eager/monitoring.py
|
{
"start": 7854,
"end": 8666
}
|
class ____(Metric):
"""A stateful class for updating a gauge-like integer metric.
This class encapsulates a set of integer values (or a single value for a
label-less metric). Each value is identified by a tuple of labels. The class
allows the user to set each value.
"""
__slots__ = []
def __init__(self, name, description, *labels):
"""Creates a new IntGauge.
Args:
name: name of the new metric.
description: description of the new metric.
*labels: The label list of the new metric.
"""
super(IntGauge, self).__init__('IntGauge', _int_gauge_methods, len(labels),
name, description, *labels)
def get_cell(self, *labels):
"""Retrieves the cell."""
return IntGaugeCell(super(IntGauge, self).get_cell(*labels))
|
IntGauge
|
python
|
crytic__slither
|
slither/tools/mutator/mutators/CR.py
|
{
"start": 214,
"end": 1457
}
|
class ____(AbstractMutator): # pylint: disable=too-few-public-methods
NAME = "CR"
HELP = "Comment Replacement"
def _mutate(self) -> Dict:
result: Dict = {}
for ( # pylint: disable=too-many-nested-blocks
function
) in self.contract.functions_and_modifiers_declared:
for node in function.nodes:
if not self.should_mutate_node(node):
continue
if node.type not in (
NodeType.ENTRYPOINT,
NodeType.ENDIF,
NodeType.ENDLOOP,
):
# Get the string
start = node.source_mapping.start
stop = start + node.source_mapping.length
old_str = node.source_mapping.content
line_no = node.source_mapping.lines
new_str = "//" + old_str
create_patch_with_line(
result,
self.in_file,
start,
stop,
old_str,
new_str,
line_no[0],
)
return result
|
CR
|
python
|
pytorch__pytorch
|
torch/utils/data/datapipes/_decorator.py
|
{
"start": 1980,
"end": 2347
}
|
class ____:
prev: bool
def __init__(self) -> None:
global _determinism
self.prev = _determinism
_determinism = True
def __enter__(self) -> None:
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
global _determinism
_determinism = self.prev
|
guaranteed_datapipes_determinism
|
python
|
python-jsonschema__jsonschema
|
jsonschema/protocols.py
|
{
"start": 1360,
"end": 7198
}
|
class ____(Protocol):
"""
The protocol to which all validator classes adhere.
Arguments:
schema:
The schema that the validator object will validate with.
It is assumed to be valid, and providing
an invalid schema can lead to undefined behavior. See
`Validator.check_schema` to validate a schema first.
registry:
a schema registry that will be used for looking up JSON references
resolver:
a resolver that will be used to resolve :kw:`$ref`
properties (JSON references). If unprovided, one will be created.
.. deprecated:: v4.18.0
`RefResolver <_RefResolver>` has been deprecated in favor of
`referencing`, and with it, this argument.
format_checker:
if provided, a checker which will be used to assert about
:kw:`format` properties present in the schema. If unprovided,
*no* format validation is done, and the presence of format
within schemas is strictly informational. Certain formats
require additional packages to be installed in order to assert
against instances. Ensure you've installed `jsonschema` with
its `extra (optional) dependencies <index:extras>` when
invoking ``pip``.
.. deprecated:: v4.12.0
Subclassing validator classes now explicitly warns this is not part of
their public API.
"""
#: An object representing the validator's meta schema (the schema that
#: describes valid schemas in the given version).
META_SCHEMA: ClassVar[Mapping]
#: A mapping of validation keywords (`str`\s) to functions that
#: validate the keyword with that name. For more information see
#: `creating-validators`.
VALIDATORS: ClassVar[Mapping]
#: A `jsonschema.TypeChecker` that will be used when validating
#: :kw:`type` keywords in JSON schemas.
TYPE_CHECKER: ClassVar[jsonschema.TypeChecker]
#: A `jsonschema.FormatChecker` that will be used when validating
#: :kw:`format` keywords in JSON schemas.
FORMAT_CHECKER: ClassVar[jsonschema.FormatChecker]
#: A function which given a schema returns its ID.
ID_OF: _typing.id_of
#: The schema that will be used to validate instances
schema: Mapping | bool
def __init__(
self,
schema: Mapping | bool,
resolver: Any = None, # deprecated
format_checker: jsonschema.FormatChecker | None = None,
*,
registry: referencing.jsonschema.SchemaRegistry = ...,
) -> None: ...
@classmethod
def check_schema(cls, schema: Mapping | bool) -> None:
"""
Validate the given schema against the validator's `META_SCHEMA`.
Raises:
`jsonschema.exceptions.SchemaError`:
if the schema is invalid
"""
def is_type(self, instance: Any, type: str) -> bool:
"""
Check if the instance is of the given (JSON Schema) type.
Arguments:
instance:
the value to check
type:
the name of a known (JSON Schema) type
Returns:
whether the instance is of the given type
Raises:
`jsonschema.exceptions.UnknownType`:
if ``type`` is not a known type
"""
def is_valid(self, instance: Any) -> bool:
"""
Check if the instance is valid under the current `schema`.
Returns:
whether the instance is valid or not
>>> schema = {"maxItems" : 2}
>>> Draft202012Validator(schema).is_valid([2, 3, 4])
False
"""
def iter_errors(self, instance: Any) -> Iterable[ValidationError]:
r"""
Lazily yield each of the validation errors in the given instance.
>>> schema = {
... "type" : "array",
... "items" : {"enum" : [1, 2, 3]},
... "maxItems" : 2,
... }
>>> v = Draft202012Validator(schema)
>>> for error in sorted(v.iter_errors([2, 3, 4]), key=str):
... print(error.message)
4 is not one of [1, 2, 3]
[2, 3, 4] is too long
.. deprecated:: v4.0.0
Calling this function with a second schema argument is deprecated.
Use `Validator.evolve` instead.
"""
def validate(self, instance: Any) -> None:
"""
Check if the instance is valid under the current `schema`.
Raises:
`jsonschema.exceptions.ValidationError`:
if the instance is invalid
>>> schema = {"maxItems" : 2}
>>> Draft202012Validator(schema).validate([2, 3, 4])
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
"""
def evolve(self, **kwargs) -> Validator:
"""
Create a new validator like this one, but with given changes.
Preserves all other attributes, so can be used to e.g. create a
validator with a different schema but with the same :kw:`$ref`
resolution behavior.
>>> validator = Draft202012Validator({})
>>> validator.evolve(schema={"type": "number"})
Draft202012Validator(schema={'type': 'number'}, format_checker=None)
The returned object satisfies the validator protocol, but may not
be of the same concrete class! In particular this occurs
when a :kw:`$ref` occurs to a schema with a different
:kw:`$schema` than this one (i.e. for a different draft).
>>> validator.evolve(
... schema={"$schema": Draft7Validator.META_SCHEMA["$id"]}
... )
Draft7Validator(schema=..., format_checker=None)
"""
|
Validator
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/callbacks/early_stopping.py
|
{
"start": 1133,
"end": 1341
}
|
class ____(Enum):
"""Enum for early stopping reasons."""
NOT_STOPPED = 0
STOPPING_THRESHOLD = 1
DIVERGENCE_THRESHOLD = 2
PATIENCE_EXHAUSTED = 3
NON_FINITE_METRIC = 4
|
EarlyStoppingReason
|
python
|
django__django
|
tests/admin_scripts/tests.py
|
{
"start": 115353,
"end": 121092
}
|
class ____(AdminScriptTestCase):
def test_invalid_name(self):
"""startapp validates that app name is a valid Python identifier."""
for bad_name in ("7testproject", "../testproject"):
with self.subTest(app_name=bad_name):
args = ["startapp", bad_name]
testproject_dir = os.path.join(self.test_dir, bad_name)
out, err = self.run_django_admin(args)
self.assertOutput(
err,
"CommandError: '{}' is not a valid app name. Please make "
"sure the name is a valid identifier.".format(bad_name),
)
self.assertFalse(os.path.exists(testproject_dir))
def test_importable_name(self):
"""
startapp validates that app name doesn't clash with existing Python
modules.
"""
bad_name = "os"
args = ["startapp", bad_name]
testproject_dir = os.path.join(self.test_dir, bad_name)
out, err = self.run_django_admin(args)
self.assertOutput(
err,
"CommandError: 'os' conflicts with the name of an existing "
"Python module and cannot be used as an app name. Please try "
"another name.",
)
self.assertFalse(os.path.exists(testproject_dir))
def test_invalid_target_name(self):
for bad_target in (
"invalid.dir_name",
"7invalid_dir_name",
".invalid_dir_name",
):
with self.subTest(bad_target):
_, err = self.run_django_admin(["startapp", "app", bad_target])
self.assertOutput(
err,
"CommandError: '%s' is not a valid app directory. Please "
"make sure the directory is a valid identifier." % bad_target,
)
def test_importable_target_name(self):
_, err = self.run_django_admin(["startapp", "app", "os"])
self.assertOutput(
err,
"CommandError: 'os' conflicts with the name of an existing Python "
"module and cannot be used as an app directory. Please try "
"another directory.",
)
def test_trailing_slash_in_target_app_directory_name(self):
app_dir = os.path.join(self.test_dir, "apps", "app1")
os.makedirs(app_dir)
_, err = self.run_django_admin(
["startapp", "app", os.path.join("apps", "app1", "")]
)
self.assertNoOutput(err)
self.assertIs(os.path.exists(os.path.join(app_dir, "apps.py")), True)
def test_overlaying_app(self):
# Use a subdirectory so it is outside the PYTHONPATH.
os.makedirs(os.path.join(self.test_dir, "apps/app1"))
self.run_django_admin(["startapp", "app1", "apps/app1"])
out, err = self.run_django_admin(["startapp", "app2", "apps/app1"])
self.assertOutput(
err,
"already exists. Overlaying an app into an existing directory "
"won't replace conflicting files.",
)
def test_template(self):
out, err = self.run_django_admin(["startapp", "new_app"])
self.assertNoOutput(err)
app_path = os.path.join(self.test_dir, "new_app")
self.assertIs(os.path.exists(app_path), True)
with open(os.path.join(app_path, "apps.py")) as f:
content = f.read()
self.assertIn("class NewAppConfig(AppConfig)", content)
self.assertInAfterFormatting("name = 'new_app'", content)
def test_creates_directory_when_custom_app_destination_missing(self):
args = [
"startapp",
"my_app",
"my_app",
]
testapp_dir = os.path.join(self.test_dir, "my_app")
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(testapp_dir))
def test_custom_app_destination_missing_with_nested_subdirectory(self):
args = [
"startapp",
"my_app",
"apps/my_app",
]
testapp_dir = os.path.join(self.test_dir, "apps", "my_app")
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(testapp_dir))
def test_custom_name_with_app_within_other_app(self):
parent_app_dir = os.path.join(self.test_dir, "parent")
self.run_django_admin(["startapp", "parent"])
self.assertTrue(os.path.exists(parent_app_dir))
nested_args = ["startapp", "child", "parent/child"]
child_app_dir = os.path.join(self.test_dir, "parent", "child")
out, err = self.run_django_admin(nested_args)
self.assertNoOutput(out)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(child_app_dir))
@unittest.skipIf(
sys.platform == "win32",
"Windows only partially supports umasks and chmod.",
)
def test_custom_app_directory_creation_error_handling(self):
"""The error is displayed to the user in case of OSError."""
args = [
"startapp",
"my_app",
"project_dir/my_app",
]
# Create a read-only parent directory.
os.makedirs(
os.path.join(self.test_dir, "project_dir"), exist_ok=True, mode=0o200
)
testapp_dir = os.path.join(self.test_dir, "project_dir", "my_app")
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(
err,
"Permission denied",
)
self.assertFalse(os.path.exists(testapp_dir))
|
StartApp
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/postgresql/test_types.py
|
{
"start": 203332,
"end": 203444
}
|
class ____(
_DateTimeMultiRangeTests, _MultiRangeTypeCompilation
):
pass
|
DateTimeMultiRangeCompilationTest
|
python
|
Farama-Foundation__Gymnasium
|
tests/envs/mujoco/test_mujoco_custom_env.py
|
{
"start": 291,
"end": 4040
}
|
class ____(MujocoEnv, utils.EzPickle):
"""
A simple mujoco env to test third party mujoco env, using the `Gymnasium.MujocoEnv` environment API.
"""
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
}
def __init__(self, xml_file="point.xml", frame_skip=1, **kwargs):
utils.EzPickle.__init__(self, xml_file, frame_skip, **kwargs)
MujocoEnv.__init__(
self,
xml_file,
frame_skip=frame_skip,
observation_space=None, # needs to be defined after
default_camera_config={},
**kwargs,
)
self.metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
"render_fps": int(np.round(1.0 / self.dt)),
}
obs_size = self.data.qpos.size + self.data.qvel.size
self.observation_space = Box(
low=-np.inf, high=np.inf, shape=(obs_size,), dtype=np.float64
)
def step(self, action):
x_position_before = self.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.data.qpos[0]
observation = self._get_obs()
reward = x_position_after - x_position_before
info = {}
if self.render_mode == "human":
self.render()
return observation, reward, False, False, info
def _get_obs(self):
position = self.data.qpos.flat.copy()
velocity = self.data.qvel.flat.copy()
return np.concatenate((position, velocity))
def reset_model(self):
qpos = self.init_qpos
qvel = self.init_qvel
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def _get_reset_info(self):
return {"works": True}
CHECK_ENV_IGNORE_WARNINGS = [
f"\x1b[33mWARN: {message}\x1b[0m"
for message in [
"A Box observation space minimum value is -infinity. This is probably too low.",
"A Box observation space maximum value is infinity. This is probably too high.",
"For Box action spaces, we recommend using a symmetric and normalized space (range=[-1, 1] or [0, 1]). See https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html for more information.",
]
]
@pytest.mark.parametrize("frame_skip", [1, 2, 3, 4, 5])
def test_frame_skip(frame_skip):
"""verify that custom envs work with different `frame_skip` values"""
env = PointEnv(frame_skip=frame_skip)
# Test if env adheres to Gym API
with warnings.catch_warnings(record=True) as w:
check_env(env.unwrapped, skip_render_check=True)
env.close()
for warning in w:
if warning.message.args[0] not in CHECK_ENV_IGNORE_WARNINGS:
raise Error(f"Unexpected warning: {warning.message}")
def test_xml_file():
"""Verify that the loading of a custom XML file works"""
relative_path = "./tests/envs/mujoco/assets/walker2d_v5_uneven_feet.xml"
env = PointEnv(xml_file=relative_path).unwrapped
assert isinstance(env, MujocoEnv)
assert env.data.qpos.size == 9
full_path = os.getcwd() + "/tests/envs/mujoco/assets/walker2d_v5_uneven_feet.xml"
env = PointEnv(xml_file=full_path).unwrapped
assert isinstance(env, MujocoEnv)
assert env.data.qpos.size == 9
# note can not test user home path (with '~') because github CI does not have a home folder
def test_reset_info():
"""Verify that the environment returns info at `reset()`"""
env = PointEnv()
_, info = env.reset()
assert info["works"] is True
|
PointEnv
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/models.py
|
{
"start": 28252,
"end": 39379
}
|
class ____(Request):
"""
Create a new model not associated with a task
:param uri: URI for the model
:type uri: str
:param name: Model name Unique within the company.
:type name: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param framework: Framework on which the model is based. Case insensitive.
Should be identical to the framework of the task which created the model.
:type framework: str
:param design: Json[d] object representing the model design. Should be
identical to the network design of the task which created the model
:type design: dict
:param labels: Json object
:type labels: dict
:param ready: Indication if the model is final and can be used by other tasks.
Default is false.
:type ready: bool
:param public: Create a public model Default is false.
:type public: bool
:param project: Project to which to model belongs
:type project: str
:param parent: Parent model
:type parent: str
:param task: Associated task ID
:type task: str
"""
_service = "models"
_action = "create"
_version = "2.23"
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": "string",
},
"type": {
"description": "The type of the metadata item",
"type": "string",
},
"value": {
"description": "The value stored in the metadata item",
"type": "string",
},
},
"type": "object",
}
},
"properties": {
"comment": {"description": "Model comment", "type": "string"},
"design": {
"additionalProperties": True,
"description": "Json[d] object representing the model design. Should be identical to the network design of the task which created the model",
"type": "object",
},
"framework": {
"description": "Framework on which the model is based. Case insensitive. Should be identical to the framework of the task which created the model.",
"type": "string",
},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object",
"type": "object",
},
"name": {
"description": "Model name Unique within the company.",
"type": "string",
},
"parent": {"description": "Parent model", "type": "string"},
"project": {
"description": "Project to which to model belongs",
"type": "string",
},
"public": {
"default": False,
"description": "Create a public model Default is false.",
"type": "boolean",
},
"ready": {
"default": False,
"description": "Indication if the model is final and can be used by other tasks. Default is false.",
"type": "boolean",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Associated task ID", "type": "string"},
"uri": {"description": "URI for the model", "type": "string"},
"metadata": {
"type": "array",
"items": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
},
},
"required": ["uri", "name"],
"type": "object",
}
def __init__(
self,
uri: str,
name: str,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
framework: Optional[str] = None,
design: Optional[dict] = None,
labels: Optional[dict] = None,
ready: Optional[bool] = False,
public: Optional[bool] = False,
project: Optional[str] = None,
parent: Optional[str] = None,
task: Optional[str] = None,
metadata: Optional[List[Any]] = None,
**kwargs: Any
) -> None:
super(CreateRequest, self).__init__(**kwargs)
self.uri = uri
self.name = name
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.framework = framework
self.design = design
self.labels = labels
self.ready = ready
self.public = public
self.project = project
self.parent = parent
self.task = task
self.metadata = metadata
@schema_property("uri")
def uri(self) -> str:
return self._property_uri
@uri.setter
def uri(self, value: str) -> None:
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", six.string_types)
self._property_uri = value
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("framework")
def framework(self) -> Optional[str]:
return self._property_framework
@framework.setter
def framework(self, value: Optional[str]) -> None:
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", six.string_types)
self._property_framework = value
@schema_property("design")
def design(self) -> Optional[dict]:
return self._property_design
@design.setter
def design(self, value: Optional[dict]) -> None:
if value is None:
self._property_design = None
return
self.assert_isinstance(value, "design", (dict,))
self._property_design = value
@schema_property("labels")
def labels(self) -> Optional[dict]:
return self._property_labels
@labels.setter
def labels(self, value: Optional[dict]) -> None:
if value is None:
self._property_labels = None
return
self.assert_isinstance(value, "labels", (dict,))
self._property_labels = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("public")
def public(self) -> Optional[bool]:
return self._property_public
@public.setter
def public(self, value: Optional[bool]) -> None:
if value is None:
self._property_public = None
return
self.assert_isinstance(value, "public", (bool,))
self._property_public = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("metadata")
def metadata(self) -> Optional[List[Any]]:
return self._property_metadata
@metadata.setter
def metadata(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetadataItem.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metadata", MetadataItem, is_array=True)
self._property_metadata = value
|
CreateRequest
|
python
|
pytorch__pytorch
|
torch/_subclasses/meta_utils.py
|
{
"start": 18782,
"end": 19305
}
|
class ____:
id: MetaStorageId
size: int
# NB: this is only populated with copy_data True, it is not directly
# serializable in JSON, you want to do something special here anyway
data: Optional[torch.UntypedStorage]
def as_json(self, describer_id: _DescriberId) -> dict[str, object]:
return {
"id": self.id,
"describer_id": describer_id,
"size": self.size if isinstance(self.size, int) else repr(self.size),
}
@dataclass(frozen=True)
|
MetaStorageDesc
|
python
|
Pylons__pyramid
|
tests/test_csrf.py
|
{
"start": 7758,
"end": 9436
}
|
class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
# set up CSRF
self.config.set_default_csrf_options(require_csrf=False)
def _callFUT(self, *args, **kwargs):
from pyramid.csrf import check_csrf_token
return check_csrf_token(*args, **kwargs)
def test_success_token(self):
request = testing.DummyRequest()
request.method = "POST"
request.POST = {'csrf_token': request.session.get_csrf_token()}
self.assertEqual(self._callFUT(request, token='csrf_token'), True)
def test_success_header(self):
request = testing.DummyRequest()
request.headers['X-CSRF-Token'] = request.session.get_csrf_token()
self.assertEqual(self._callFUT(request, header='X-CSRF-Token'), True)
def test_success_default_token(self):
request = testing.DummyRequest()
request.method = "POST"
request.POST = {'csrf_token': request.session.get_csrf_token()}
self.assertEqual(self._callFUT(request), True)
def test_success_default_header(self):
request = testing.DummyRequest()
request.headers['X-CSRF-Token'] = request.session.get_csrf_token()
self.assertEqual(self._callFUT(request), True)
def test_failure_raises(self):
from pyramid.exceptions import BadCSRFToken
request = testing.DummyRequest()
self.assertRaises(BadCSRFToken, self._callFUT, request, 'csrf_token')
def test_failure_no_raises(self):
request = testing.DummyRequest()
result = self._callFUT(request, 'csrf_token', raises=False)
self.assertEqual(result, False)
|
Test_check_csrf_token
|
python
|
google__pytype
|
pytype/tests/test_unions.py
|
{
"start": 2318,
"end": 6201
}
|
class ____(test_base.BaseTest):
"""Tests for the A | B | ... type union syntax."""
def test_basic(self):
ty = self.Infer("""
x: int | str
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
x: Union[int, str]
""",
)
def test_chained(self):
ty = self.Infer("""
class A: pass
class B: pass
x: int | str | A | B
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
x: Union[int, str, A, B]
class A: ...
class B: ...
""",
)
def test_none(self):
ty = self.Infer("""
x: int | str | None
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Optional, Union
x: Optional[Union[int, str]]
""",
)
def test_mixed(self):
ty = self.Infer("""
from typing import Union
class A: pass
class B: pass
x: int | str | Union[A, B]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
x: Union[int, str, A, B]
class A: ...
class B: ...
""",
)
def test_forward_ref(self):
ty = self.Infer("""
from typing import Union
class A: pass
x: 'int | str | A | B'
class B: pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
x: Union[int, str, A, B]
class A: ...
class B: ...
""",
)
def test_non_type(self):
self.Check("""
x = __any_object__ | __any_object__
y = __any_object__ | __any_object__
for z in (x, y):
pass
""")
def test_unsupported_late_annotation(self):
"""Don't allow partial late annotations."""
# TODO(b/240617766): missing-parameter is the wrong error.
self.CheckWithErrors("""
a: int | 'str' = 0 # invalid-annotation # missing-parameter
b: 'Bar' | int = 0 # invalid-annotation # unsupported-operands
c: 'Foo' | 'Bar' = 0 # invalid-annotation # unsupported-operands
""")
def test_unsupported_operands(self):
"""Don't treat assignments to | expressions as annotations."""
# TODO(b/240617766): missing-parameter is the wrong error.
self.CheckWithErrors("""
a = int | 'str' # missing-parameter
b = 'Bar' | int # unsupported-operands
c = 'Foo' | 'Bar' # unsupported-operands
""")
def test_assign_union_to_var(self):
errors = self.CheckWithErrors("""
from typing import Union
def f1() -> Union[bytes, int]:
return 1
def f2() -> Union[str, int]:
return 1
def f3() -> Union[int, str]:
return 1
x1: str = f1() # annotation-type-mismatch[e1b] # annotation-type-mismatch[e1i]
x2: str = f2() # annotation-type-mismatch[e2]
x3: str = f3() # annotation-type-mismatch[e3]
""")
self.assertErrorRegexes(
errors,
{
"e1b": r"str.*bytes.*Union\[bytes, int\]",
"e1i": r"str.*int.*Union\[bytes, int\]",
"e2": r"str.*int.*Union\[int, str\]",
"e3": r"str.*int.*Union\[int, str\]",
},
)
def test_assign_union_as_arg(self):
errors = self.CheckWithErrors("""
from typing import Union
def f1() -> Union[bytes, int]:
return 1
def f2() -> Union[str, int]:
return 1
def f3() -> Union[int, str]:
return 1
def g(x: str) -> None:
pass
g(f1()) # wrong-arg-types[e1]
g(f2()) # TODO(b/365533163): This should fail since Union[str, int] is not assignable to str.
g(f3()) # TODO(b/365533163): This should fail since Union[int, str] is not assignable to str.
""")
self.assertErrorRegexes(
errors,
{
"e1": r"str.*Union\[bytes, int\]",
},
)
if __name__ == "__main__":
test_base.main()
|
UnionOrTest
|
python
|
scipy__scipy
|
scipy/stats/tests/test_morestats.py
|
{
"start": 16588,
"end": 26159
}
|
class ____:
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0021, atol=0.00025)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0020, atol=0.00025)
@pytest.mark.xslow
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
samples = (t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14)
Tk, tm, p = stats.anderson_ksamp(samples, midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
rng = np.random.default_rng(6989860141921615054)
method = stats.PermutationMethod(n_resamples=9999, rng=rng)
res = stats.anderson_ksamp(samples, midrank=False, method=method)
assert_array_equal(res.statistic, Tk)
assert_array_equal(res.critical_values, tm)
assert_allclose(res.pvalue, p, atol=6e-4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
def test_R_kSamples(self):
# test values generates with R package kSamples
# package version 1.2-6 (2017-06-14)
# r1 = 1:100
# continuous case (no ties) --> version 1
# res <- kSamples::ad.test(r1, r1 + 40.5)
# res$ad[1, "T.AD"] # 41.105
# res$ad[1, " asympt. P-value"] # 5.8399e-18
#
# discrete case (ties allowed) --> version 2 (here: midrank=True)
# res$ad[2, "T.AD"] # 41.235
#
# res <- kSamples::ad.test(r1, r1 + .5)
# res$ad[1, "T.AD"] # -1.2824
# res$ad[1, " asympt. P-value"] # 1
# res$ad[2, "T.AD"] # -1.2944
#
# res <- kSamples::ad.test(r1, r1 + 7.5)
# res$ad[1, "T.AD"] # 1.4923
# res$ad[1, " asympt. P-value"] # 0.077501
#
# res <- kSamples::ad.test(r1, r1 + 6)
# res$ad[2, "T.AD"] # 0.63892
# res$ad[2, " asympt. P-value"] # 0.17981
#
# res <- kSamples::ad.test(r1, r1 + 11.5)
# res$ad[1, "T.AD"] # 4.5042
# res$ad[1, " asympt. P-value"] # 0.00545
#
# res <- kSamples::ad.test(r1, r1 + 13.5)
# res$ad[1, "T.AD"] # 6.2982
# res$ad[1, " asympt. P-value"] # 0.00118
x1 = np.linspace(1, 100, 100)
# test case: different distributions;p-value floored at 0.001
# test case for issue #5493 / #8536
with warnings.catch_warnings():
warnings.filterwarnings("ignore", 'p-value floored', UserWarning)
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5], midrank=False)
assert_almost_equal(s, 41.105, 3)
assert_equal(p, 0.001)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", 'p-value floored', UserWarning)
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5])
assert_almost_equal(s, 41.235, 3)
assert_equal(p, 0.001)
# test case: similar distributions --> p-value capped at 0.25
with warnings.catch_warnings():
warnings.filterwarnings("ignore", 'p-value capped', UserWarning)
s, _, p = stats.anderson_ksamp([x1, x1 + .5], midrank=False)
assert_almost_equal(s, -1.2824, 4)
assert_equal(p, 0.25)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", 'p-value capped', UserWarning)
s, _, p = stats.anderson_ksamp([x1, x1 + .5])
assert_almost_equal(s, -1.2944, 4)
assert_equal(p, 0.25)
# test case: check interpolated p-value in [0.01, 0.25] (no ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 7.5], midrank=False)
assert_almost_equal(s, 1.4923, 4)
assert_allclose(p, 0.0775, atol=0.005, rtol=0)
# test case: check interpolated p-value in [0.01, 0.25] (w/ ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 6])
assert_almost_equal(s, 0.6389, 4)
assert_allclose(p, 0.1798, atol=0.005, rtol=0)
# test extended critical values for p=0.001 and p=0.005
s, _, p = stats.anderson_ksamp([x1, x1 + 11.5], midrank=False)
assert_almost_equal(s, 4.5042, 4)
assert_allclose(p, 0.00545, atol=0.0005, rtol=0)
s, _, p = stats.anderson_ksamp([x1, x1 + 13.5], midrank=False)
assert_almost_equal(s, 6.2982, 4)
assert_allclose(p, 0.00118, atol=0.0001, rtol=0)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
res = stats.anderson_ksamp((t1, t2), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
assert_equal(res.significance_level, res.pvalue)
@make_xp_test_case(stats.ansari)
|
TestAndersonKSamp
|
python
|
streamlit__streamlit
|
lib/streamlit/web/server/upload_file_request_handler.py
|
{
"start": 1105,
"end": 5822
}
|
class ____(tornado.web.RequestHandler):
"""Implements the POST /upload_file endpoint."""
def initialize(
self,
file_mgr: MemoryUploadedFileManager,
is_active_session: Callable[[str], bool],
) -> None:
"""
Parameters
----------
file_mgr : UploadedFileManager
The server's singleton UploadedFileManager. All file uploads
go here.
is_active_session:
A function that returns true if a session_id belongs to an active
session.
"""
self._file_mgr = file_mgr
self._is_active_session = is_active_session
def set_default_headers(self) -> None:
self.set_header("Access-Control-Allow-Methods", "PUT, OPTIONS, DELETE")
self.set_header("Access-Control-Allow-Headers", "Content-Type")
if is_xsrf_enabled():
self.set_header(
"Access-Control-Allow-Origin",
server_util.get_url(config.get_option("browser.serverAddress")),
)
self.set_header("Access-Control-Allow-Headers", "X-Xsrftoken, Content-Type")
self.set_header("Vary", "Origin")
self.set_header("Access-Control-Allow-Credentials", "true")
elif routes.allow_all_cross_origin_requests():
self.set_header("Access-Control-Allow-Origin", "*")
elif routes.is_allowed_origin(origin := self.request.headers.get("Origin")):
self.set_header("Access-Control-Allow-Origin", cast("str", origin))
def options(self, **kwargs: Any) -> None:
"""/OPTIONS handler for preflight CORS checks.
When a browser is making a CORS request, it may sometimes first
send an OPTIONS request, to check whether the server understands the
CORS protocol. This is optional, and doesn't happen for every request
or in every browser. If an OPTIONS request does get sent, and is not
then handled by the server, the browser will fail the underlying
request.
The proper way to handle this is to send a 204 response ("no content")
with the CORS headers attached. (These headers are automatically added
to every outgoing response, including OPTIONS responses,
via set_default_headers().)
See https://developer.mozilla.org/en-US/docs/Glossary/Preflight_request
"""
self.set_status(204)
self.finish()
def put(self, **kwargs: Any) -> None:
"""Receive an uploaded file and add it to our UploadedFileManager."""
args: dict[str, list[bytes]] = {}
files: dict[str, list[Any]] = {}
if not self.path_kwargs:
# This is not expected to happen with normal Streamlit usage.
self.send_error(
400,
reason="No path arguments provided. Please provide a session_id and file_id in the URL.",
)
return
session_id = self.path_kwargs["session_id"]
file_id = self.path_kwargs["file_id"]
tornado.httputil.parse_body_arguments(
content_type=self.request.headers["Content-Type"],
body=self.request.body,
arguments=args,
files=files,
)
try:
if not self._is_active_session(session_id):
self.send_error(400, reason="Invalid session_id")
return
except Exception as ex:
self.send_error(400, reason=str(ex))
return
uploaded_files: list[UploadedFileRec] = []
for flist in files.values():
uploaded_files.extend(
UploadedFileRec(
file_id=file_id,
name=file["filename"],
type=file["content_type"],
data=file["body"],
)
for file in flist
)
if len(uploaded_files) != 1:
self.send_error(
400, reason=f"Expected 1 file, but got {len(uploaded_files)}"
)
return
self._file_mgr.add_file(session_id=session_id, file=uploaded_files[0])
self.set_status(204)
def delete(self, **kwargs: Any) -> None:
"""Delete file request handler."""
if not self.path_kwargs:
self.send_error(
400,
reason="No path arguments provided. Please provide a session_id and file_id in the URL.",
)
return
session_id = self.path_kwargs["session_id"]
file_id = self.path_kwargs["file_id"]
self._file_mgr.remove_file(session_id=session_id, file_id=file_id)
self.set_status(204)
|
UploadFileRequestHandler
|
python
|
tensorflow__tensorflow
|
tensorflow/python/trackable/resource.py
|
{
"start": 1196,
"end": 2427
}
|
class ____:
"""An object that tracks a list of resources."""
__slots__ = ["_resources"]
def __init__(self):
self._resources = []
@property
def resources(self):
return self._resources
def add_resource(self, resource):
self._resources.append(resource)
@tf_contextlib.contextmanager
def resource_tracker_scope(resource_tracker):
"""A context to manage resource trackers.
Use this in order to collect up all resources created within a block of code.
Example usage:
```python
resource_tracker = ResourceTracker()
with resource_tracker_scope(resource_tracker):
resource = TrackableResource()
assert resource_tracker.resources == [resource]
Args:
resource_tracker: The passed in ResourceTracker object
Yields:
A scope in which the resource_tracker is active.
"""
global _RESOURCE_TRACKER_STACK
old = list(_RESOURCE_TRACKER_STACK)
_RESOURCE_TRACKER_STACK.append(resource_tracker)
try:
yield
finally:
_RESOURCE_TRACKER_STACK = old
def _make_getter(captured_getter, captured_previous):
"""To avoid capturing loop variables."""
def getter(*args, **kwargs):
return captured_getter(captured_previous, *args, **kwargs)
return getter
|
ResourceTracker
|
python
|
Textualize__textual
|
docs/examples/tutorial/stopwatch.py
|
{
"start": 1468,
"end": 2306
}
|
class ____(HorizontalGroup):
"""A stopwatch widget."""
def on_button_pressed(self, event: Button.Pressed) -> None:
"""Event handler called when a button is pressed."""
button_id = event.button.id
time_display = self.query_one(TimeDisplay)
if button_id == "start":
time_display.start()
self.add_class("started")
elif button_id == "stop":
time_display.stop()
self.remove_class("started")
elif button_id == "reset":
time_display.reset()
def compose(self) -> ComposeResult:
"""Create child widgets of a stopwatch."""
yield Button("Start", id="start", variant="success")
yield Button("Stop", id="stop", variant="error")
yield Button("Reset", id="reset")
yield TimeDisplay()
|
Stopwatch
|
python
|
tensorflow__tensorflow
|
tensorflow/python/grappler/item_test.py
|
{
"start": 1361,
"end": 4737
}
|
class ____(test.TestCase):
def testInvalidItem(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(10)
b = constant_op.constant(20)
c = a + b # pylint: disable=unused-variable
mg = meta_graph.create_meta_graph_def(graph=g)
# The train op isn't specified: this should raise an InvalidArgumentError
# exception.
with self.assertRaises(errors_impl.InvalidArgumentError):
item.Item(mg)
def testImportantOps(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(10)
b = constant_op.constant(20)
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_list = grappler_item.IdentifyImportantOps()
self.assertItemsEqual(['Const', 'Const_1', 'add'], op_list)
def testOpProperties(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(10)
b = constant_op.constant(20)
c = a + b
z = control_flow_ops.no_op()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
# All the nodes in this model have one scalar output
for node in grappler_item.metagraph.graph_def.node:
node_prop = op_properties[node.name]
if node.name == z.name:
self.assertEqual(0, len(node_prop))
else:
self.assertEqual(1, len(node_prop))
self.assertEqual(dtypes.int32, node_prop[0].dtype)
self.assertEqual(tensor_shape.TensorShape([]), node_prop[0].shape)
def testUpdates(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(10)
b = constant_op.constant(20)
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
initial_tf_item = grappler_item.tf_item
no_change_tf_item = grappler_item.tf_item
self.assertEqual(initial_tf_item, no_change_tf_item)
# Modify the placement.
for node in grappler_item.metagraph.graph_def.node:
node.device = '/cpu:0'
new_tf_item = grappler_item.tf_item
self.assertNotEqual(initial_tf_item, new_tf_item)
# Assign the same placement.
for node in grappler_item.metagraph.graph_def.node:
node.device = '/cpu:0'
newest_tf_item = grappler_item.tf_item
self.assertEqual(new_tf_item, newest_tf_item)
@test_util.run_v1_only('b/120545219')
def testColocationConstraints(self):
with ops.Graph().as_default() as g:
c = constant_op.constant([10])
v = variable_v1.VariableV1([3], dtype=dtypes.int32)
i = gen_array_ops.ref_identity(v)
a = state_ops.assign(i, c)
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(a)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
groups = grappler_item.GetColocationGroups()
self.assertEqual(len(groups), 1)
self.assertItemsEqual(
groups[0], ['Assign', 'RefIdentity', 'Variable', 'Variable/Assign'])
if __name__ == '__main__':
test.main()
|
ItemTest
|
python
|
explosion__spaCy
|
spacy/lang/ja/__init__.py
|
{
"start": 1105,
"end": 7309
}
|
class ____(DummyTokenizer):
def __init__(self, vocab: Vocab, split_mode: Optional[str] = None) -> None:
self.vocab = vocab
self.split_mode = split_mode
self.tokenizer = try_sudachi_import(self.split_mode)
# if we're using split mode A we don't need subtokens
self.need_subtokens = not (split_mode is None or split_mode == "A")
def __reduce__(self):
return JapaneseTokenizer, (self.vocab, self.split_mode)
def __call__(self, text: str) -> Doc:
# convert sudachipy.morpheme.Morpheme to DetailedToken and merge continuous spaces
sudachipy_tokens = self.tokenizer.tokenize(text)
dtokens = self._get_dtokens(sudachipy_tokens)
dtokens, spaces = get_dtokens_and_spaces(dtokens, text)
# create Doc with tag bi-gram based part-of-speech identification rules
words, tags, inflections, lemmas, norms, readings, sub_tokens_list = (
zip(*dtokens) if dtokens else [[]] * 7
)
sub_tokens_list = list(sub_tokens_list)
doc = Doc(self.vocab, words=words, spaces=spaces)
next_pos = None # for bi-gram rules
for idx, (token, dtoken) in enumerate(zip(doc, dtokens)):
token.tag_ = dtoken.tag
if next_pos: # already identified in previous iteration
token.pos = next_pos
next_pos = None
else:
token.pos, next_pos = resolve_pos(
token.orth_,
dtoken.tag,
tags[idx + 1] if idx + 1 < len(tags) else None,
)
# if there's no lemma info (it's an unk) just use the surface
token.lemma_ = dtoken.lemma if dtoken.lemma else dtoken.surface
morph = {}
if dtoken.inf:
# it's normal for this to be empty for non-inflecting types
morph["Inflection"] = dtoken.inf
token.norm_ = dtoken.norm
if dtoken.reading:
# punctuation is its own reading, but we don't want values like
# "=" here
morph["Reading"] = re.sub("[=|]", "_", dtoken.reading)
token.morph = MorphAnalysis(self.vocab, morph)
if self.need_subtokens:
doc.user_data["sub_tokens"] = sub_tokens_list
return doc
def _get_dtokens(self, sudachipy_tokens, need_sub_tokens: bool = True):
sub_tokens_list = (
self._get_sub_tokens(sudachipy_tokens) if need_sub_tokens else None
)
dtokens = [
DetailedToken(
token.surface(), # orth
"-".join([xx for xx in token.part_of_speech()[:4] if xx != "*"]), # tag
";".join([xx for xx in token.part_of_speech()[4:] if xx != "*"]), # inf
token.dictionary_form(), # lemma
token.normalized_form(),
token.reading_form(),
(
sub_tokens_list[idx] if sub_tokens_list else None
), # user_data['sub_tokens']
)
for idx, token in enumerate(sudachipy_tokens)
if len(token.surface()) > 0
# remove empty tokens which can be produced with characters like … that
]
# Sudachi normalizes internally and outputs each space char as a token.
# This is the preparation for get_dtokens_and_spaces() to merge the continuous space tokens
return [
t
for idx, t in enumerate(dtokens)
if idx == 0
or not t.surface.isspace()
or t.tag != "空白"
or not dtokens[idx - 1].surface.isspace()
or dtokens[idx - 1].tag != "空白"
]
def _get_sub_tokens(self, sudachipy_tokens):
# do nothing for default split mode
if not self.need_subtokens:
return None
sub_tokens_list = [] # list of (list of list of DetailedToken | None)
for token in sudachipy_tokens:
sub_a = token.split(self.tokenizer.SplitMode.A)
if len(sub_a) == 1: # no sub tokens
sub_tokens_list.append(None)
elif self.split_mode == "B":
sub_tokens_list.append([self._get_dtokens(sub_a, False)])
else: # "C"
sub_b = token.split(self.tokenizer.SplitMode.B)
if len(sub_a) == len(sub_b):
dtokens = self._get_dtokens(sub_a, False)
sub_tokens_list.append([dtokens, dtokens])
else:
sub_tokens_list.append(
[
self._get_dtokens(sub_a, False),
self._get_dtokens(sub_b, False),
]
)
return sub_tokens_list
def score(self, examples):
validate_examples(examples, "JapaneseTokenizer.score")
return Scorer.score_tokenization(examples)
def _get_config(self) -> Dict[str, Any]:
return {"split_mode": self.split_mode}
def _set_config(self, config: Dict[str, Any] = {}) -> None:
self.split_mode = config.get("split_mode", None)
def to_bytes(self, **kwargs) -> bytes:
serializers = {"cfg": lambda: srsly.json_dumps(self._get_config())}
return util.to_bytes(serializers, [])
def from_bytes(self, data: bytes, **kwargs) -> "JapaneseTokenizer":
deserializers = {"cfg": lambda b: self._set_config(srsly.json_loads(b))}
util.from_bytes(data, deserializers, [])
self.tokenizer = try_sudachi_import(self.split_mode)
return self
def to_disk(self, path: Union[str, Path], **kwargs) -> None:
path = util.ensure_path(path)
serializers = {"cfg": lambda p: srsly.write_json(p, self._get_config())}
util.to_disk(path, serializers, [])
def from_disk(self, path: Union[str, Path], **kwargs) -> "JapaneseTokenizer":
path = util.ensure_path(path)
serializers = {"cfg": lambda p: self._set_config(srsly.read_json(p))}
util.from_disk(path, serializers, [])
self.tokenizer = try_sudachi_import(self.split_mode)
return self
|
JapaneseTokenizer
|
python
|
walkccc__LeetCode
|
solutions/709. To Lower Case/709.py
|
{
"start": 0,
"end": 134
}
|
class ____:
def toLowerCase(self, str: str) -> str:
return ''.join(chr(ord(c) + 32) if 'A' <= c <= 'Z' else c for c in str)
|
Solution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.